diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi index bfe29023fbd511..07772510b7afb7 100644 --- a/arch/arm/boot/dts/stm32mp151.dtsi +++ b/arch/arm/boot/dts/stm32mp151.dtsi @@ -28,6 +28,14 @@ method = "smc"; }; + firmware { + optee: optee { + compatible = "linaro,optee-tz"; + method = "smc"; + status = "disabled"; + }; + }; + intc: interrupt-controller@a0021000 { compatible = "arm,cortex-a7-gic"; #interrupt-cells = <3>; diff --git a/arch/arm/boot/dts/stm32mp157c-dk2.dts b/arch/arm/boot/dts/stm32mp157c-dk2.dts index 045636555dddfc..2868dd8b4dd963 100644 --- a/arch/arm/boot/dts/stm32mp157c-dk2.dts +++ b/arch/arm/boot/dts/stm32mp157c-dk2.dts @@ -16,6 +16,13 @@ model = "STMicroelectronics STM32MP157C-DK2 Discovery Board"; compatible = "st,stm32mp157c-dk2", "st,stm32mp157"; + reserved-memory { + optee_memory: optee@0xde000000 { + reg = <0xde000000 0x02000000>; + no-map; + }; + }; + aliases { ethernet0 = ðernet0; serial0 = &uart4; @@ -95,3 +102,7 @@ pinctrl-2 = <&usart2_idle_pins_c>; status = "disabled"; }; + +&optee { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts index 85628e16d2d568..fa515206161f7c 100644 --- a/arch/arm/boot/dts/stm32mp157c-ev1.dts +++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts @@ -372,3 +372,7 @@ &usbphyc { status = "okay"; }; + +&optee { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index 05ae893d1b2ee7..1754c8981b102c 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -22,11 +22,14 @@ aliases { serial0 = &v2m_serial0; - serial1 = &v2m_serial1; serial2 = &v2m_serial2; serial3 = &v2m_serial3; }; + ftpm { + compatible = "microsoft,ftpm"; + }; + cpus { #address-cells = <2>; #size-cells = <0>; @@ -67,6 +70,17 @@ <0x00000008 0x80000000 0 0x80000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + optee@0x83000000 { + reg = <0x00000000 0x83000000 0 0x01000000>; + no-map; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts = , @@ -197,14 +211,6 @@ clock-names = "uartclk", "apb_pclk"; }; - v2m_serial1: serial@a0000 { - compatible = "arm,pl011", "arm,primecell"; - reg = <0x0a0000 0x1000>; - interrupts = <6>; - clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; - clock-names = "uartclk", "apb_pclk"; - }; - v2m_serial2: serial@b0000 { compatible = "arm,pl011", "arm,primecell"; reg = <0x0b0000 0x1000>; @@ -228,4 +234,12 @@ }; }; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; + }; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index f6c55877fbd94f..0ac89a86ae946c 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -800,6 +800,18 @@ <0x00000008 0x80000000 0x1 0x80000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* Shared memory between secure and non-secure world */ + optee@0xfee00000 { + reg = <0x00000000 0xfee00000 0 0x00200000>; + no-map; + }; + }; + bus@8000000 { compatible = "simple-bus"; #address-cells = <2>; @@ -837,4 +849,11 @@ interrupt-map-mask = <0 0>; interrupt-map = <0 0 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts index 7d370dac4c8571..3bb161655313c4 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts @@ -6,6 +6,7 @@ */ /dts-v1/; +/memreserve/ 0x00000000 0x04080000; #include #include "hi3798cv200.dtsi" @@ -70,6 +71,13 @@ gpio = <&gpio6 7 0>; enable-active-high; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; &ehci { diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 6dffada2e66b4c..e9ba9bf34143e0 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts @@ -60,6 +60,13 @@ gpio = <&pio 9 GPIO_ACTIVE_HIGH>; enable-active-high; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; &cec { diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 5e046f9d48ce91..13b40caafb2be5 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -468,15 +468,6 @@ reg = <0 0x10007000 0 0x100>; }; - timer: timer@10008000 { - compatible = "mediatek,mt8173-timer", - "mediatek,mt6577-timer"; - reg = <0 0x10008000 0 0x1000>; - interrupts = ; - clocks = <&infracfg CLK_INFRA_CLK_13M>, - <&topckgen CLK_TOP_RTC_SEL>; - }; - pwrap: pwrap@1000d000 { compatible = "mediatek,mt8173-pwrap"; reg = <0 0x1000d000 0 0x1000>; diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index 989fe84a9f9d79..f3cfa1171a2bfe 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -25,3 +25,35 @@ config ION_CMA_HEAP Choose this option to enable CMA heaps with Ion. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config ION_UNMAPPED_HEAP + bool "ION unmapped heap support" + depends on ION && (ARM || ARM64) + help + Choose this option to enable UNMAPPED heaps with Ion. This heap is + backed in specific memory pools, carveout from the Linux memory. + Unless you know your system has these regions, you should say N here. + +config ION_DUMMY_UNMAPPED_HEAP + bool "ION dummy driver define an ION unmapped heap" + depends on ION_UNMAPPED_HEAP + help + Dummy ION driver will create a unmapped heap from physical + location provided through CONFIG_ION_DUMMY_UNMAPPED_BASE and + CONFIG_ION_DUMMY_UNMAPPED_SIZE. + +config ION_DUMMY_UNMAPPED_BASE + hex "Physical base address of the ION unmapped heap" + depends on ION_DUMMY_UNMAPPED_HEAP + default 0 + help + Allows one the statically define an unmapped heap from the + ION dummy driver to exercice unamped heaps buffer managment. + +config ION_DUMMY_UNMAPPED_SIZE + hex "Physical byte size of the ION unmapped heap" + depends on ION_DUMMY_UNMAPPED_HEAP + default 0 + help + Allows one the statically define an unmapped heap from the + ION dummy driver to exercice unamped heaps buffer managment. diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index 5f4487b1a22440..8cc78673661aea 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_ION) += ion.o ion_heap.o obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o +obj-$(CONFIG_ION_UNMAPPED_HEAP) += ion_unmapped_heap.o diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 3c9f09506ffa22..c007cfffc2c531 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -137,7 +137,8 @@ static void ion_buffer_kmap_put(struct ion_buffer *buffer) } } -static struct sg_table *dup_sg_table(struct sg_table *table) +static struct sg_table *dup_sg_table(struct sg_table *table, + bool preserve_dma_address) { struct sg_table *new_table; int ret, i; @@ -156,7 +157,8 @@ static struct sg_table *dup_sg_table(struct sg_table *table) new_sg = new_table->sgl; for_each_sgtable_sg(table, sg, i) { memcpy(new_sg, sg, sizeof(*sg)); - new_sg->dma_address = 0; + if (!preserve_dma_address) + new_sg->dma_address = 0; new_sg = sg_next(new_sg); } @@ -173,6 +175,7 @@ struct ion_dma_buf_attachment { struct device *dev; struct sg_table *table; struct list_head list; + bool no_map; }; static int ion_dma_buf_attach(struct dma_buf *dmabuf, @@ -186,7 +189,10 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, if (!a) return -ENOMEM; - table = dup_sg_table(buffer->sg_table); + if (buffer->heap->type == ION_HEAP_TYPE_UNMAPPED) + a->no_map = true; + + table = dup_sg_table(buffer->sg_table, a->no_map); if (IS_ERR(table)) { kfree(a); return -ENOMEM; @@ -228,6 +234,9 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, table = a->table; + if (a->no_map) + return table; + ret = dma_map_sgtable(attachment->dev, table, direction, 0); if (ret) return ERR_PTR(ret); @@ -239,6 +248,11 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { + struct ion_dma_buf_attachment *a = attachment->priv; + + if (a->no_map) + return; + dma_unmap_sgtable(attachment->dev, table, direction, 0); } diff --git a/drivers/staging/android/ion/ion_unmapped_heap.c b/drivers/staging/android/ion/ion_unmapped_heap.c new file mode 100644 index 00000000000000..28898360920190 --- /dev/null +++ b/drivers/staging/android/ion/ion_unmapped_heap.c @@ -0,0 +1,252 @@ +/* + * drivers/staging/android/ion/ion_unmapped_heap.c + * + * Copyright (C) 2016-2017 Linaro, Inc. + * Copyright (C) Allwinner 2014 + * Author: for Allwinner. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * ION heap type for handling physical memory heap not mapped + * in the linux-based OS. + * + * "unmapped heap" buffers are default not mapped but buffer owner + * can explicitly request mapping for some specific purpose. + * + * Based on Allwinner work (allocation thru gen_pool) and + * HiSilicon work (create ION heaps from DT nodes, + * Author: Chen Feng ). + */ + +#include +#include +#include +#include + +#include "ion.h" + +/* + * TODO: non-contigous unammped heaps: + * - add a flag to specify contiguity constraint? + * - define antoher heap type that allocate to the smae pool(s)? + */ + +struct ion_unmapped_heap { + struct ion_heap heap; + struct gen_pool *pool; + phys_addr_t base; + size_t size; +}; + +struct unmapped_buffer_priv { + phys_addr_t base; +}; + +static phys_addr_t get_buffer_base(struct unmapped_buffer_priv *priv) +{ + return priv->base; +} + +static struct device *heap2dev(struct ion_heap *heap) +{ + return heap->dev->dev.this_device; +} + +static phys_addr_t ion_unmapped_allocate(struct ion_heap *heap, + unsigned long size, + phys_addr_t *addr) +{ + struct ion_unmapped_heap *umh = + container_of(heap, struct ion_unmapped_heap, heap); + unsigned long offset = gen_pool_alloc(umh->pool, size); + + if (!offset) { + dev_err(heap2dev(heap), + "%s(%d) err: alloc 0x%08x bytes failed\n", + __func__, __LINE__, (u32)size); + return false; + } + + *addr = offset; + return true; +} + +static void ion_unmapped_free(struct ion_heap *heap, phys_addr_t addr, + unsigned long size) +{ + struct ion_unmapped_heap *umh = + container_of(heap, struct ion_unmapped_heap, heap); + + gen_pool_free(umh->pool, addr, size); +} + +static struct sg_table *ion_unmapped_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct sg_table *table; + int ret; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) { + kfree(table); + return ERR_PTR(ret); + } + sg_set_page(table->sgl, + phys_to_page(get_buffer_base(buffer->priv_virt)), + buffer->size, 0); + + return table; +} + +void ion_unmapped_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + sg_free_table(buffer->sg_table); + kfree(buffer->sg_table); +} + +static int ion_unmapped_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned long flags) +{ + struct unmapped_buffer_priv *priv; + phys_addr_t base; + int rc = -EINVAL; + + if (!ion_unmapped_allocate(heap, size, &base)) + return -ENOMEM; + + priv = devm_kzalloc(heap2dev(heap), sizeof(*priv), GFP_KERNEL); + if (IS_ERR_OR_NULL(priv)) { + rc = -ENOMEM; + goto err; + } + + priv->base = base; + buffer->size = roundup(size, PAGE_SIZE); + buffer->priv_virt = priv; + + buffer->sg_table = ion_unmapped_heap_map_dma(heap, buffer); + if (!buffer->sg_table) { + rc = -ENOMEM; + goto err; + } + sg_dma_address(buffer->sg_table->sgl) = priv->base; + sg_dma_len(buffer->sg_table->sgl) = size; + return 0; +err: + ion_unmapped_free(heap, base, size); + devm_kfree(heap2dev(heap), priv); + buffer->priv_virt = NULL; + return rc; +} + +static void ion_unmapped_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + + ion_unmapped_heap_unmap_dma(heap, buffer); + ion_unmapped_free(heap, get_buffer_base(buffer->priv_virt), + buffer->size); + devm_kfree(heap2dev(heap), buffer->priv_virt); + buffer->priv_virt = NULL; +} + +static int ion_unmapped_heap_map_user(struct ion_heap *heap, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + phys_addr_t pa = get_buffer_base(buffer->priv_virt); + + /* + * when user call ION_IOC_ALLOC not with ION_FLAG_CACHED, ion_mmap will + * change vma->vm_page_prot to pgprot_writecombine itself, so we do not + * need change to pgprot_writecombine here manually. + */ + return remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(pa) + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static struct ion_heap_ops unmapped_heap_ops = { + .allocate = ion_unmapped_heap_allocate, + .free = ion_unmapped_heap_free, + .map_user = ion_unmapped_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_unmapped_heap_create(phys_addr_t base, size_t size) +{ + struct ion_unmapped_heap *umh; + + umh = kzalloc(sizeof(struct ion_unmapped_heap), GFP_KERNEL); + if (!umh) + return ERR_PTR(-ENOMEM); + + umh->pool = gen_pool_create(PAGE_SHIFT, -1); + if (!umh->pool) { + kfree(umh); + return ERR_PTR(-ENOMEM); + } + umh->base = base; + umh->size = size; + + gen_pool_add(umh->pool, umh->base, size, -1); + umh->heap.ops = &unmapped_heap_ops; + umh->heap.type = ION_HEAP_TYPE_UNMAPPED; + + return &umh->heap; +} +EXPORT_SYMBOL(ion_unmapped_heap_create); + +void ion_unmapped_heap_destroy(struct ion_heap *heap) +{ + struct ion_unmapped_heap *umh = + container_of(heap, struct ion_unmapped_heap, heap); + + gen_pool_destroy(umh->pool); + kfree(umh); + umh = NULL; +} +EXPORT_SYMBOL(ion_unmapped_heap_destroy); + +#if defined(CONFIG_ION_DUMMY_UNMAPPED_HEAP) && CONFIG_ION_DUMMY_UNMAPPED_SIZE +#define DUMMY_UNAMMPED_HEAP_NAME "unmapped_contiguous" + +static int ion_add_dummy_unmapped_heaps(void) +{ + struct ion_heap *heap; + const char name[] = DUMMY_UNAMMPED_HEAP_NAME; + + heap = ion_unmapped_heap_create(CONFIG_ION_DUMMY_UNMAPPED_BASE, + CONFIG_ION_DUMMY_UNMAPPED_SIZE); + if (IS_ERR(heap)) + return PTR_ERR(heap); + + heap->name = kzalloc(sizeof(name), GFP_KERNEL); + if (IS_ERR(heap->name)) { + kfree(heap); + return PTR_ERR(heap->name); + } + memcpy((char *)heap->name, name, sizeof(name)); + + ion_device_add_heap(heap); + return 0; +} +device_initcall(ion_add_dummy_unmapped_heaps); +#endif diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h index 46c93fcb46d6e6..32862f2623d397 100644 --- a/drivers/staging/android/uapi/ion.h +++ b/drivers/staging/android/uapi/ion.h @@ -19,6 +19,8 @@ * carveout heap, allocations are physically * contiguous * @ION_HEAP_TYPE_DMA: memory allocated via DMA API + * @ION_HEAP_TYPE_UNMAPPED: memory not intended to be mapped into the + * linux address space unless for debug cases * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask * is used to identify the heaps, so only 32 * total heap types are supported @@ -29,6 +31,7 @@ enum ion_heap_type { ION_HEAP_TYPE_CARVEOUT, ION_HEAP_TYPE_CHUNK, ION_HEAP_TYPE_DMA, + ION_HEAP_TYPE_UNMAPPED, ION_HEAP_TYPE_CUSTOM, /* * must be last so device specific heaps always * are at the end of this enum diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h index d7f798c3394bc7..a837676d1866ac 100644 --- a/drivers/tee/amdtee/amdtee_private.h +++ b/drivers/tee/amdtee/amdtee_private.h @@ -122,13 +122,17 @@ static inline u32 get_session_index(u32 session) int amdtee_open_session(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, - struct tee_param *param); + struct tee_param *normal_param, + u32 num_normal_params, + struct tee_param *ocall_param); int amdtee_close_session(struct tee_context *ctx, u32 session); int amdtee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, - struct tee_param *param); + struct tee_param *normal_param, + u32 num_normal_params, + struct tee_param *ocall_param); int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c index 27b4cd77d0db6f..4ff958b1b931a1 100644 --- a/drivers/tee/amdtee/core.c +++ b/drivers/tee/amdtee/core.c @@ -230,7 +230,9 @@ static void destroy_session(struct kref *ref) int amdtee_open_session(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, - struct tee_param *param) + struct tee_param *normal_param, + u32 num_normal_params, + struct tee_param *ocall_param) { struct amdtee_context_data *ctxdata = ctx->data; struct amdtee_session *sess = NULL; @@ -239,6 +241,11 @@ int amdtee_open_session(struct tee_context *ctx, int rc, i; void *ta; + if (ocall_param) { + pr_err("OCALLs not supported\n"); + return -EOPNOTSUPP; + } + if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) { pr_err("unsupported client login method\n"); return -EINVAL; @@ -279,7 +286,7 @@ int amdtee_open_session(struct tee_context *ctx, } /* Open session with loaded TA */ - handle_open_session(arg, &session_info, param); + handle_open_session(arg, &session_info, normal_param); if (arg->ret != TEEC_SUCCESS) { pr_err("open_session failed %d\n", arg->ret); spin_lock(&sess->lock); @@ -391,12 +398,19 @@ void amdtee_unmap_shmem(struct tee_shm *shm) int amdtee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, - struct tee_param *param) + struct tee_param *normal_param, + u32 num_normal_params, + struct tee_param *ocall_param) { struct amdtee_context_data *ctxdata = ctx->data; struct amdtee_session *sess; u32 i, session_info; + if (ocall_param) { + pr_err("OCALLs not supported\n"); + return -EOPNOTSUPP; + } + /* Check that the session is valid */ mutex_lock(&session_list_mutex); sess = find_session(ctxdata, arg->session); @@ -409,7 +423,7 @@ int amdtee_invoke_func(struct tee_context *ctx, if (!sess) return -EINVAL; - handle_invoke_cmd(arg, session_info, param); + handle_invoke_cmd(arg, session_info, normal_param); return 0; } @@ -422,6 +436,7 @@ int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) static const struct tee_driver_ops amdtee_ops = { .get_version = amdtee_get_version, .open = amdtee_open, + .pre_release = NULL, .release = amdtee_release, .open_session = amdtee_open_session, .close_session = amdtee_close_session, diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 3ca71e3812ed4b..be57a62d5c6e57 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -15,3 +15,10 @@ config OPTEE_SHM_NUM_PRIV_PAGES help This sets the number of private shared memory pages to be used by OP-TEE TEE driver. + +config OPTEE_BENCHMARK + bool "OP-TEE Benchmark (EXPERIMENTAL)" + depends on OPTEE + help + This enables benchmarking feature in the OP-TEE Trusted + Execution Environment (TEE) driver. diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile index 56263ae3b1d7a3..725ac111208d3b 100644 --- a/drivers/tee/optee/Makefile +++ b/drivers/tee/optee/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_OPTEE) += optee.o optee-objs += core.o +optee-objs += call_queue.o optee-objs += call.o optee-objs += rpc.o optee-objs += supp.o optee-objs += shm_pool.o optee-objs += device.o +optee-objs += bench.o diff --git a/drivers/tee/optee/bench.c b/drivers/tee/optee/bench.c new file mode 100644 index 00000000000000..9e73b2fb54adc1 --- /dev/null +++ b/drivers/tee/optee/bench.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2017, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include "optee_bench.h" + +/* + * Specific defines for ARM performance timers + */ +/* aarch32 */ +#define OPTEE_BENCH_DEF_OPTS (1 | 16) +#define OPTEE_BENCH_DEF_OVER 0x8000000f +/* enable 64 divider for CCNT */ +#define OPTEE_BENCH_DIVIDER_OPTS (OPTEE_BENCH_DEF_OPTS | 8) + +/* aarch64 */ +#define OPTEE_BENCH_ARMV8_PMCR_MASK 0x3f +#define OPTEE_BENCH_ARMV8_PMCR_E (1 << 0) /* Enable all counters */ +#define OPTEE_BENCH_ARMV8_PMCR_P (1 << 1) /* Reset all counters */ +#define OPTEE_BENCH_ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ +#define OPTEE_BENCH_ARMV8_PMCR_D (1 << 3) /* 64 divider */ + +#define OPTEE_BENCH_ARMV8_PMUSERENR_EL0 (1 << 0) /* EL0 access enable */ +#define OPTEE_BENCH_ARMV8_PMUSERENR_CR (1 << 2) /* CCNT read enable */ + +struct optee_ts_global *optee_bench_ts_global; +struct rw_semaphore optee_bench_ts_rwsem; + +#ifdef CONFIG_OPTEE_BENCHMARK +static inline u32 armv8pmu_pmcr_read(void) +{ + u32 val = 0; + + asm volatile("mrs %0, pmcr_el0" : "=r"(val)); + + return (u32)val; +} + +static inline void armv8pmu_pmcr_write(u32 val) +{ + val &= OPTEE_BENCH_ARMV8_PMCR_MASK; + asm volatile("msr pmcr_el0, %0" :: "r"((u64)val)); +} + +static inline u64 read_ccounter(void) +{ + u64 ccounter; + +#ifdef __aarch64__ + asm volatile("mrs %0, PMCCNTR_EL0" : "=r"(ccounter)); +#else + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(ccounter)); +#endif + + return ccounter * OPTEE_BENCH_DIVIDER; +} + +static void optee_pmu_setup(void *data) +{ +#ifdef __aarch64__ + /* Enable EL0 access to PMU counters. */ + asm volatile("msr pmuserenr_el0, %0" :: "r"((u64) + OPTEE_BENCH_ARMV8_PMUSERENR_EL0 | + OPTEE_BENCH_ARMV8_PMUSERENR_CR)); + /* Enable PMU counters */ + armv8pmu_pmcr_write(OPTEE_BENCH_ARMV8_PMCR_P | + OPTEE_BENCH_ARMV8_PMCR_C | + OPTEE_BENCH_ARMV8_PMCR_D); + asm volatile("msr pmcntenset_el0, %0" :: "r"((u64)(1 << 31))); + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | + OPTEE_BENCH_ARMV8_PMCR_E); +#else + /* Enable EL0 access to PMU counters */ + asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1)); + /* Enable all PMU counters */ + asm volatile("mcr p15, 0, %0, c9, c12, 0" :: "r" + (OPTEE_BENCH_DIVIDER_OPTS)); + /* Disable counter overflow interrupts */ + asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(OPTEE_BENCH_DEF_OVER)); +#endif +} + +static void optee_pmu_disable(void *data) +{ +#ifdef __aarch64__ + /* Disable EL0 access */ + asm volatile("msr pmuserenr_el0, %0" :: "r"((u64)0)); + /* Disable PMU counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ~OPTEE_BENCH_ARMV8_PMCR_E); +#else + /* Disable all PMU counters */ + asm volatile("mcr p15, 0, %0, c9, c12, 0" :: "r"(0)); + /* Enable counter overflow interrupts */ + asm volatile("mcr p15, 0, %0, c9, c12, 2" :: "r"(OPTEE_BENCH_DEF_OVER)); + /* Disable EL0 access to PMU counters. */ + asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(0)); +#endif +} + +void optee_bm_enable(void) +{ + on_each_cpu(optee_pmu_setup, NULL, 1); +} + +void optee_bm_disable(void) +{ + on_each_cpu(optee_pmu_disable, NULL, 1); +} + +void optee_bm_timestamp(void) +{ + struct optee_ts_cpu_buf *cpu_buf; + struct optee_time_st ts_data; + uint64_t ts_i; + void *ret_addr; + int cur_cpu = 0; + int ret; + + down_read(&optee_bench_ts_rwsem); + + if (!optee_bench_ts_global) { + up_read(&optee_bench_ts_rwsem); + return; + } + + cur_cpu = get_cpu(); + + if (cur_cpu >= optee_bench_ts_global->cores) { + put_cpu(); + up_read(&optee_bench_ts_rwsem); + return; + } + + ret_addr = __builtin_return_address(0); + + cpu_buf = &optee_bench_ts_global->cpu_buf[cur_cpu]; + ts_i = __sync_fetch_and_add(&cpu_buf->head, 1); + ts_data.cnt = read_ccounter(); + ts_data.addr = (uintptr_t)ret_addr; + ts_data.src = OPTEE_BENCH_KMOD; + cpu_buf->stamps[ts_i & OPTEE_BENCH_MAX_MASK] = ts_data; + + up_read(&optee_bench_ts_rwsem); + + put_cpu(); +} +#endif /* CONFIG_OPTEE_BENCHMARK */ diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index 20b6fd7383c54b..499ebbed3387c6 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -13,163 +13,479 @@ #include #include "optee_private.h" #include "optee_smc.h" +#include "optee_bench.h" -struct optee_call_waiter { - struct list_head list_node; - struct completion c; -}; - -static void optee_cq_wait_init(struct optee_call_queue *cq, - struct optee_call_waiter *w) +/* Requires the filpstate mutex to be held */ +static struct optee_session *find_session(struct optee_context_data *ctxdata, + u32 session_id) { - /* - * We're preparing to make a call to secure world. In case we can't - * allocate a thread in secure world we'll end up waiting in - * optee_cq_wait_for_completion(). - * - * Normally if there's no contention in secure world the call will - * complete and we can cleanup directly with optee_cq_wait_final(). - */ - mutex_lock(&cq->mutex); + struct optee_session *sess; - /* - * We add ourselves to the queue, but we don't wait. This - * guarantees that we don't lose a completion if secure world - * returns busy and another thread just exited and try to complete - * someone. - */ - init_completion(&w->c); - list_add_tail(&w->list_node, &cq->waiters); + list_for_each_entry(sess, &ctxdata->sess_list, list_node) + if (sess->session_id == session_id) + return sess; - mutex_unlock(&cq->mutex); + return NULL; } -static void optee_cq_wait_for_completion(struct optee_call_queue *cq, - struct optee_call_waiter *w) +static void param_clear_ocall(struct tee_param *ocall) { - wait_for_completion(&w->c); + if (ocall) + memset(&ocall->u, 0, sizeof(ocall->u)); +} - mutex_lock(&cq->mutex); +static u64 param_get_ocall_func(struct tee_param *param) +{ + return TEE_IOCTL_OCALL_GET_FUNC(param->u.value.a); +} - /* Move to end of list to get out of the way for other waiters */ - list_del(&w->list_node); - reinit_completion(&w->c); - list_add_tail(&w->list_node, &cq->waiters); +/* Requires @sem in the parent struct optee_session to be held */ +static int verify_ocall_request(u32 num_params, struct optee_call_ctx *call_ctx) +{ + size_t n; + size_t sz; + struct optee_msg_arg *arg = call_ctx->rpc_arg; + + switch (arg->cmd) { + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + case OPTEE_MSG_RPC_CMD_SHM_FREE: + if (!num_params) + return -EINVAL; + + /* + * This parameter either carries the requested allocation size + * or a pointer to the SHM to be freed. + */ + if (!arg->num_params || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + return -EINVAL; + + /* + * Ensure that we won't read past the end of the SHM underlying + * arg->params. + */ + sz = sizeof(*arg) + sizeof(*arg->params) * arg->num_params; + if (sz > call_ctx->rpc_shm->size) + return -EINVAL; + + /* The remaining parameters are unused */ + for (n = 1; n < arg->num_params; n++) + if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) + return -EINVAL; + break; + case OPTEE_MSG_RPC_CMD_OCALL: + /* 'num_params' is checked later */ + + /* These parameters carry the OCALL descriptors */ + if (arg->num_params < 2 || + arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INOUT || + arg->params[1].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT || + arg->params[0].u.value.a > U32_MAX || /* OCALL Cmd Id */ + arg->params[1].u.value.c != 0) /* TA UUID (128 bytes) */ + return -EINVAL; + break; + default: + return -EINVAL; + } - mutex_unlock(&cq->mutex); + return 0; } -static void optee_cq_complete_one(struct optee_call_queue *cq) +/* Requires @sem in the parent struct optee_session to be held */ +static int verify_ocall_reply(u64 func, struct tee_param *params, + u32 num_params, struct optee_call_ctx *call_ctx) { - struct optee_call_waiter *w; + size_t n; + + switch (func) { + case TEE_IOCTL_OCALL_CMD_SHM_ALLOC: + if (call_ctx->rpc_arg->cmd != OPTEE_MSG_RPC_CMD_SHM_ALLOC || + !num_params) + return -EINVAL; + + /* This parameter carries the allocated SHM ID */ + if (params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT || + params[0].u.value.c > INT_MAX) + return -EINVAL; + break; + case TEE_IOCTL_OCALL_CMD_SHM_FREE: + if (call_ctx->rpc_arg->cmd != OPTEE_MSG_RPC_CMD_SHM_FREE || + !num_params) + return -EINVAL; + + /* Sanity check, not used while processing the reply */ + if (params[0].attr != TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT) + return -EINVAL; + break; + case TEE_IOCTL_OCALL_CMD_INVOKE: + if (call_ctx->rpc_arg->cmd != OPTEE_MSG_RPC_CMD_OCALL) + return -EINVAL; + + /* Skip the loop below */ + return 0; + default: + return -EINVAL; + } + + /* The remaining parameters are unused */ + for (n = 1; n < num_params; n++) + if (params[n].attr != TEE_IOCTL_PARAM_ATTR_TYPE_NONE) + return -EINVAL; + + return 0; +} - list_for_each_entry(w, &cq->waiters, list_node) { - if (!completion_done(&w->c)) { - complete(&w->c); +/* Requires @sem in the parent struct optee_session to be held */ +static void process_ocall_memrefs(struct optee_msg_param *params, + u32 num_params, bool increment) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_shm *shm; + const struct optee_msg_param *mp = params + n; + u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; + + switch (attr) { + case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: + shm = (struct tee_shm *)(uintptr_t)mp->u.rmem.shm_ref; + break; + default: + shm = NULL; break; } + + if (!shm) + continue; + + if (increment) + tee_shm_get(shm); + else + tee_shm_put(shm); } } -static void optee_cq_wait_final(struct optee_call_queue *cq, - struct optee_call_waiter *w) +/* + * Requires @sem in the parent struct optee_session to be held (if OCALLs are + * expected) + */ +static void call_prologue(struct optee_call_ctx *call_ctx) { + struct optee *optee = tee_get_drvdata(call_ctx->ctx->teedev); + + /* Initialize waiter */ + optee_cq_wait_init(&optee->call_queue, &call_ctx->waiter); +} + +/* + * Requires @sem in the parent struct optee_session to be held (if OCALLs are + * expected) + */ +static void call_epilogue(struct optee_call_ctx *call_ctx) +{ + struct optee *optee = tee_get_drvdata(call_ctx->ctx->teedev); + + optee_rpc_finalize_call(call_ctx); + /* - * We're done with the call to secure world. The thread in secure - * world that was used for this call is now available for some - * other task to use. + * We're done with our thread in secure world, if there's any + * thread waiters wake up one. */ - mutex_lock(&cq->mutex); + optee_cq_wait_final(&optee->call_queue, &call_ctx->waiter); +} + +/* Requires @sem in the parent struct optee_session to be held */ +static int process_ocall_request(struct tee_param *params, u32 num_params, + struct tee_param *ocall, + struct optee_call_ctx *call_ctx) +{ + u32 cmd_id; + struct tee_shm *shm; + size_t shm_sz; + struct optee_msg_param *msg_param; + u32 msg_num_params; + int rc = 0; - /* Get out of the list */ - list_del(&w->list_node); + /* + * Points to the octets of the UUID corresponding to the TA requesting + * the OCALL, if applicable for this call. + */ + void *clnt_id; - /* Wake up one eventual waiting task */ - optee_cq_complete_one(cq); + rc = verify_ocall_request(num_params, call_ctx); + if (rc) + goto exit_set_ret; /* - * If we're completed we've got a completion from another task that - * was just done with its call to secure world. Since yet another - * thread now is available in secure world wake up another eventual - * waiting task. + * Clear out the parameters of the original function invocation. The + * original contents are backed up in call_ctx->msg_arg and will be + * restored elsewhere once the OCALL is over. */ - if (completion_done(&w->c)) - optee_cq_complete_one(cq); + memset(params, 0, num_params * sizeof(*params)); + + /* Set up the OCALL request */ + switch (call_ctx->rpc_arg->cmd) { + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + ocall->u.value.a = + TEE_IOCTL_OCALL_MAKE_PAIR(TEE_IOCTL_OCALL_CMD_SHM_ALLOC, + 0); + + shm_sz = call_ctx->rpc_arg->params[0].u.value.b; + params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; + params[0].u.value.a = 0; + params[0].u.value.b = shm_sz; + params[0].u.value.c = 0; + break; + case OPTEE_MSG_RPC_CMD_SHM_FREE: + ocall->u.value.a = + TEE_IOCTL_OCALL_MAKE_PAIR(TEE_IOCTL_OCALL_CMD_SHM_FREE, + 0); + + shm = (struct tee_shm *)(uintptr_t) + call_ctx->rpc_arg->params[0].u.value.b; + params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; + params[0].u.value.a = 0; + params[0].u.value.b = 0; + params[0].u.value.c = tee_shm_get_id(shm); + break; + case OPTEE_MSG_RPC_CMD_OCALL: + /* -2 here and +2 below to skip the OCALL descriptors */ + msg_num_params = call_ctx->rpc_arg->num_params - 2; + if (num_params < msg_num_params) { + rc = -EINVAL; + goto exit_set_ret; + } + + msg_param = call_ctx->rpc_arg->params + 2; + rc = optee_from_msg_param(params, msg_num_params, msg_param); + if (rc) + goto exit_set_ret; + + process_ocall_memrefs(msg_param, msg_num_params, true); + call_ctx->rpc_must_release = true; + + cmd_id = (u32)call_ctx->rpc_arg->params[0].u.value.a; + ocall->u.value.a = + TEE_IOCTL_OCALL_MAKE_PAIR(TEE_IOCTL_OCALL_CMD_INVOKE, + cmd_id); + + clnt_id = &call_ctx->rpc_arg->params[1].u.value; + memcpy(&ocall->u.value.b, clnt_id, TEE_IOCTL_UUID_LEN); + break; + default: + /* NOT REACHED */ + rc = -EINVAL; + goto exit_set_ret; + } + + return rc; - mutex_unlock(&cq->mutex); +exit_set_ret: + call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + return rc; } -/* Requires the filpstate mutex to be held */ -static struct optee_session *find_session(struct optee_context_data *ctxdata, - u32 session_id) +/* Requires @sem in the parent struct optee_session to be held */ +static int process_ocall_reply(u32 ret, u32 ret_origin, + struct tee_param *params, u32 num_params, + struct tee_param *ocall, + struct optee_call_ctx *call_ctx) { - struct optee_session *sess; + const u64 func = param_get_ocall_func(ocall); + struct tee_shm *shm; + void *shm_pages_list; + struct optee_msg_param *msg_param; + u32 msg_num_params; + int rc = 0; - list_for_each_entry(sess, &ctxdata->sess_list, list_node) - if (sess->session_id == session_id) - return sess; + rc = verify_ocall_reply(func, params, num_params, call_ctx); + if (rc) + goto exit_set_ret; - return NULL; + switch (func) { + case TEE_IOCTL_OCALL_CMD_SHM_ALLOC: + if (ret != TEEC_SUCCESS) + goto exit_propagate_ret; + + if (call_ctx->ocall_pages_list) + goto exit_set_ret; + + shm = tee_shm_get_from_id(call_ctx->ctx, + (int)params[0].u.value.c); + if (IS_ERR(shm)) { + rc = PTR_ERR(shm); + goto exit_set_ret; + } + + rc = optee_rpc_process_shm_alloc(shm, call_ctx->rpc_arg->params, + &shm_pages_list); + + /* The CA holds a reference */ + tee_shm_put(shm); + + if (rc) + goto exit_set_ret; + + /* Could be NULL and zero, respectively */ + call_ctx->ocall_pages_list = shm_pages_list; + call_ctx->ocall_num_entries = shm->num_pages; + break; + case TEE_IOCTL_OCALL_CMD_SHM_FREE: + /* OP-TEE ignores ret and ret_origin for an SHM_FREE RPC */ + if (ret != TEEC_SUCCESS) + goto exit_propagate_ret; + + if (call_ctx->ocall_pages_list) { + optee_free_pages_list(call_ctx->ocall_pages_list, + call_ctx->ocall_num_entries); + call_ctx->ocall_pages_list = NULL; + call_ctx->ocall_num_entries = 0; + } + break; + case TEE_IOCTL_OCALL_CMD_INVOKE: + /* -2 here and +2 below to skip the OCALL descriptors */ + msg_num_params = call_ctx->rpc_arg->num_params - 2; + if (num_params < msg_num_params) { + rc = -EINVAL; + goto exit_set_ret; + } + + msg_param = call_ctx->rpc_arg->params + 2; + rc = optee_to_msg_param(msg_param, msg_num_params, params); + if (rc) + goto exit_set_ret; + + process_ocall_memrefs(msg_param, msg_num_params, false); + call_ctx->rpc_must_release = false; + + call_ctx->rpc_arg->params[0].u.value.b = ret; + call_ctx->rpc_arg->params[0].u.value.c = ret_origin; + break; + default: + rc = -EINVAL; + goto exit_set_ret; + } + + call_ctx->rpc_arg->ret = TEEC_SUCCESS; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + + return rc; + +exit_propagate_ret: + call_ctx->rpc_arg->ret = ret; + call_ctx->rpc_arg->ret_origin = ret_origin; + return -EINVAL; +exit_set_ret: + call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + return rc; +} + +static void clear_call_ctx(struct optee_call_ctx *call_ctx) +{ + memset(call_ctx, 0, sizeof(*call_ctx)); } /** - * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world + * optee_do_call_with_ctx() - Invoke OP-TEE in secure world * @ctx: calling context - * @parg: physical address of message to pass to secure world * * Does and SMC to OP-TEE in secure world and handles eventual resulting * Remote Procedure Calls (RPC) from OP-TEE. * - * Returns return code from secure world, 0 is OK + * Returns return code from secure world, 0 is OK, -EAGAIN means an OCALL + * request was received. */ -u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) +u32 optee_do_call_with_ctx(struct optee_call_ctx *call_ctx) { - struct optee *optee = tee_get_drvdata(ctx->teedev); - struct optee_call_waiter w; + struct optee *optee = tee_get_drvdata(call_ctx->ctx->teedev); struct optee_rpc_param param = { }; - struct optee_call_ctx call_ctx = { }; u32 ret; - param.a0 = OPTEE_SMC_CALL_WITH_ARG; - reg_pair_from_64(¶m.a1, ¶m.a2, parg); - /* Initialize waiter */ - optee_cq_wait_init(&optee->call_queue, &w); + if (call_ctx->rpc_shm) { + param.a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; + reg_pair_from_64(¶m.a1, ¶m.a2, + (uintptr_t)call_ctx->rpc_shm); + param.a3 = call_ctx->thread_id; + } else { + param.a0 = OPTEE_SMC_CALL_WITH_ARG; + reg_pair_from_64(¶m.a1, ¶m.a2, call_ctx->msg_parg); + } + while (true) { struct arm_smccc_res res; + optee_bm_timestamp(); + optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, param.a4, param.a5, param.a6, param.a7, &res); + optee_bm_timestamp(); + if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { /* - * Out of threads in secure world, wait for a thread + * Out of threads in secure world, wait for a thread to * become available. */ - optee_cq_wait_for_completion(&optee->call_queue, &w); + optee_cq_wait_for_completion(&optee->call_queue, + &call_ctx->waiter); } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { - might_sleep(); param.a0 = res.a0; param.a1 = res.a1; param.a2 = res.a2; param.a3 = res.a3; - optee_handle_rpc(ctx, ¶m, &call_ctx); + + if (optee_rpc_is_ocall(¶m, call_ctx)) + return -EAGAIN; + + might_sleep(); + optee_handle_rpc(call_ctx->ctx, ¶m, call_ctx); } else { ret = res.a0; break; } } - optee_rpc_finalize_call(&call_ctx); - /* - * We're done with our thread in secure world, if there's any - * thread waiters wake up one. - */ - optee_cq_wait_final(&optee->call_queue, &w); - return ret; } +/** + * optee_do_call_with_arg() - Invoke OP-TEE in secure world + * @ctx: calling context + * @parg: physical address of message to pass to secure world + * + * Wraps a call to optee_do_call_with_ctx that sets up the calling context on + * behalf of a caller that does not expect OCALLs. + * + * Returns return code from secure world, 0 is OK + */ +u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) +{ + struct optee_call_ctx call_ctx = { }; + int rc; + + call_ctx.ctx = ctx; + call_ctx.msg_parg = parg; + + call_prologue(&call_ctx); + + rc = optee_do_call_with_ctx(&call_ctx); + if (rc == -EAGAIN) { + pr_warn("received an unexpected OCALL, cancelling it now"); + call_ctx.rpc_arg->ret = TEEC_ERROR_NOT_SUPPORTED; + call_ctx.rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + optee_do_call_with_ctx(&call_ctx); + } + + call_epilogue(&call_ctx); + + return rc; +} + static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, struct optee_msg_arg **msg_arg, phys_addr_t *msg_parg) @@ -205,88 +521,256 @@ static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, return shm; } -int optee_open_session(struct tee_context *ctx, - struct tee_ioctl_open_session_arg *arg, - struct tee_param *param) +/* + * Requires @sem in the parent struct optee_session to be held; the caller is + * expected to have filled in the ret and ret_origin elements of rpc_arg. + */ +static int cancel_ocall(struct optee_call_ctx *call_ctx) { - struct optee_context_data *ctxdata = ctx->data; int rc; + + /* +2 and -2 to skip the OCALL descriptors */ + if (call_ctx->rpc_must_release) { + process_ocall_memrefs(call_ctx->rpc_arg->params + 2, + call_ctx->rpc_arg->num_params - 2, false); + call_ctx->rpc_must_release = false; + } + + if (call_ctx->ocall_pages_list) { + optee_free_pages_list(call_ctx->ocall_pages_list, + call_ctx->ocall_num_entries); + call_ctx->ocall_pages_list = NULL; + call_ctx->ocall_num_entries = 0; + } + + rc = optee_do_call_with_ctx(call_ctx); + if (rc == -EAGAIN) + pr_warn("received an OCALL while cancelling an OCALL"); + + call_epilogue(call_ctx); + + return rc; +} + +static int close_session(struct tee_context *ctx, u32 session) +{ struct tee_shm *shm; struct optee_msg_arg *msg_arg; phys_addr_t msg_parg; - struct optee_session *sess = NULL; - /* +2 for the meta parameters added below */ - shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); + shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); if (IS_ERR(shm)) return PTR_ERR(shm); - msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; - msg_arg->cancel_id = arg->cancel_id; + msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; + msg_arg->session = session; + optee_do_call_with_arg(ctx, msg_parg); - /* - * Initialize and add the meta parameters needed when opening a - * session. - */ - msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | - OPTEE_MSG_ATTR_META; - msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | - OPTEE_MSG_ATTR_META; - memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); - msg_arg->params[1].u.value.c = arg->clnt_login; - - rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value, - arg->clnt_login, arg->clnt_uuid); - if (rc) - goto out; + tee_shm_free(shm); + return 0; +} - rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); - if (rc) - goto out; +int optee_open_session(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *normal_param, u32 num_normal_params, + struct tee_param *ocall_param) +{ + struct optee_context_data *ctxdata = ctx->data; + struct optee_session *sess = NULL; + struct optee_call_ctx *call_ctx = NULL; + int sess_tmp_id; + u64 ocall_func; + int rc = 0; - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) { - rc = -ENOMEM; - goto out; - } + if (ocall_param && !ctx->cap_ocall) + return -EOPNOTSUPP; - if (optee_do_call_with_arg(ctx, msg_parg)) { - msg_arg->ret = TEEC_ERROR_COMMUNICATION; - msg_arg->ret_origin = TEEC_ORIGIN_COMMS; - } + ocall_func = ocall_param ? param_get_ocall_func(ocall_param) : 0; + if (ocall_func) { + if (arg->session > INT_MAX) + return -EINVAL; - if (msg_arg->ret == TEEC_SUCCESS) { - /* A new session has been created, add it to the list. */ - sess->session_id = msg_arg->session; + sess_tmp_id = (int)arg->session; mutex_lock(&ctxdata->mutex); - list_add(&sess->list_node, &ctxdata->sess_list); + sess = idr_remove(&ctxdata->tmp_sess_list, sess_tmp_id); mutex_unlock(&ctxdata->mutex); + if (!sess) + return -EINVAL; + + call_ctx = &sess->call_ctx; + if (!call_ctx->rpc_shm) { + rc = -EINVAL; + call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + goto exit_cancel; + } + + rc = process_ocall_reply(arg->ret, arg->ret_origin, + normal_param, num_normal_params, + ocall_param, call_ctx); + if (rc) + goto exit_cancel; } else { - kfree(sess); + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) + return -ENOMEM; + + call_ctx = &sess->call_ctx; + + /* +2 for the meta parameters added below */ + call_ctx->msg_shm = get_msg_arg(ctx, num_normal_params + 2, + &call_ctx->msg_arg, + &call_ctx->msg_parg); + if (IS_ERR(call_ctx->msg_shm)) { + rc = PTR_ERR(call_ctx->msg_shm); + goto exit_free; + } + + call_ctx->ctx = ctx; + call_ctx->msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; + call_ctx->msg_arg->cancel_id = arg->cancel_id; + + /* + * Initialize and add the meta parameters needed when opening a + * session. + */ + call_ctx->msg_arg->params[0].attr = + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | OPTEE_MSG_ATTR_META; + call_ctx->msg_arg->params[1].attr = + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | OPTEE_MSG_ATTR_META; + memcpy(&call_ctx->msg_arg->params[0].u.value, arg->uuid, + sizeof(arg->uuid)); + call_ctx->msg_arg->params[1].u.value.c = arg->clnt_login; + rc = tee_session_calc_client_uuid((uuid_t *) + &call_ctx->msg_arg->params[1].u.value, + arg->clnt_login, arg->clnt_uuid); + if (rc) + goto exit_free_shm; + + rc = optee_to_msg_param(call_ctx->msg_arg->params + 2, + num_normal_params, normal_param); + if (rc) + goto exit_free_shm; + + call_prologue(call_ctx); } - if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { - arg->ret = TEEC_ERROR_COMMUNICATION; - arg->ret_origin = TEEC_ORIGIN_COMMS; - /* Close session again to avoid leakage */ - optee_close_session(ctx, msg_arg->session); + rc = optee_do_call_with_ctx(call_ctx); + if (rc == -EAGAIN) { + rc = process_ocall_request(normal_param, num_normal_params, + ocall_param, call_ctx); + if (rc) + goto exit_cancel; + + /* + * 'sess' becomes globally visible after adding it to the IDR, + * so do not touch it once the mutex is unlocked. + */ + mutex_lock(&ctxdata->mutex); + sess_tmp_id = idr_alloc(&ctxdata->tmp_sess_list, sess, 1, 0, + GFP_KERNEL); + if (sess_tmp_id >= 1) + sess->session_id = sess_tmp_id; + mutex_unlock(&ctxdata->mutex); + if (sess_tmp_id < 0) { + rc = sess_tmp_id; + call_ctx->rpc_arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + goto exit_cancel; + } + + arg->session = sess_tmp_id; } else { - arg->session = msg_arg->session; - arg->ret = msg_arg->ret; - arg->ret_origin = msg_arg->ret_origin; + call_epilogue(call_ctx); + + if (rc) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + } else { + arg->ret = call_ctx->msg_arg->ret; + arg->ret_origin = call_ctx->msg_arg->ret_origin; + } + + if (optee_from_msg_param(normal_param, num_normal_params, + call_ctx->msg_arg->params + 2)) { + if (arg->ret == TEEC_SUCCESS) + close_session(ctx, call_ctx->msg_arg->session); + + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (arg->ret) + goto exit_clear_free_all; + + /* + * A new session has been created, initialize it and add it to + * the list. + */ + sema_init(&sess->sem, 1); + arg->session = call_ctx->msg_arg->session; + sess->session_id = call_ctx->msg_arg->session; + + tee_shm_free(call_ctx->msg_shm); + clear_call_ctx(call_ctx); + + mutex_lock(&ctxdata->mutex); + list_add(&sess->list_node, &ctxdata->sess_list); + mutex_unlock(&ctxdata->mutex); + + param_clear_ocall(ocall_param); } -out: - tee_shm_free(shm); return rc; + +exit_cancel: + /* See comment in optee_cancel_open_session_ocall */ + if (cancel_ocall(call_ctx) == 0 && + call_ctx->msg_arg->ret == TEEC_SUCCESS) + close_session(ctx, call_ctx->msg_arg->session); + optee_from_msg_param(normal_param, num_normal_params, + call_ctx->msg_arg->params); +exit_clear_free_all: + param_clear_ocall(ocall_param); +exit_free_shm: + tee_shm_free(call_ctx->msg_shm); +exit_free: + kfree(sess); + return rc; +} + +void optee_cancel_open_session_ocall(struct optee_session *sess) +{ + struct optee_call_ctx *call_ctx = &sess->call_ctx; + + call_ctx->rpc_arg->ret = TEEC_ERROR_TARGET_DEAD; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + + /* + * Reaching this function means an OCALL is pending during session open + * but the CA has terminated abnormally. As such, the OCALL is + * cancelled. However, there is a chance that the TA's session open + * handler ignores the cancellation and lets the session open anyway. If + * that happens, close it. + */ + if (cancel_ocall(&sess->call_ctx) == 0 && + call_ctx->msg_arg->ret == TEEC_SUCCESS) + close_session(call_ctx->ctx, call_ctx->msg_arg->session); + + /* + * Decrease the ref count on all shared memory pointers passed into the + * original function invocation. + */ + process_ocall_memrefs(call_ctx->msg_arg->params, + call_ctx->msg_arg->num_params, false); + + tee_shm_free(call_ctx->msg_shm); + kfree(sess); } int optee_close_session(struct tee_context *ctx, u32 session) { struct optee_context_data *ctxdata = ctx->data; - struct tee_shm *shm; - struct optee_msg_arg *msg_arg; - phys_addr_t msg_parg; struct optee_session *sess; /* Check that the session is valid and remove it from the list */ @@ -297,66 +781,162 @@ int optee_close_session(struct tee_context *ctx, u32 session) mutex_unlock(&ctxdata->mutex); if (!sess) return -EINVAL; - kfree(sess); - shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); - if (IS_ERR(shm)) - return PTR_ERR(shm); + /* + * If another thread found the session before we removed it from the + * list and that thread is operating on the session object itself, wait + * until it is done before we destroy it. + */ + down(&sess->sem); - msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; - msg_arg->session = session; - optee_do_call_with_arg(ctx, msg_parg); + if (sess->call_ctx.rpc_shm) + optee_cancel_invoke_function_ocall(&sess->call_ctx); + + kfree(sess); + close_session(ctx, session); - tee_shm_free(shm); return 0; } int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, - struct tee_param *param) + struct tee_param *normal_param, u32 num_normal_params, + struct tee_param *ocall_param) { struct optee_context_data *ctxdata = ctx->data; - struct tee_shm *shm; - struct optee_msg_arg *msg_arg; - phys_addr_t msg_parg; + struct optee_call_ctx *call_ctx; struct optee_session *sess; - int rc; + u64 ocall_func; + int rc = 0; + + if (ocall_param && !ctx->cap_ocall) { + rc = -EOPNOTSUPP; + goto exit; + } /* Check that the session is valid */ mutex_lock(&ctxdata->mutex); sess = find_session(ctxdata, arg->session); + if (sess) + down(&sess->sem); mutex_unlock(&ctxdata->mutex); if (!sess) return -EINVAL; - shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); - if (IS_ERR(shm)) - return PTR_ERR(shm); - msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; - msg_arg->func = arg->func; - msg_arg->session = arg->session; - msg_arg->cancel_id = arg->cancel_id; + call_ctx = &sess->call_ctx; + ocall_func = ocall_param ? param_get_ocall_func(ocall_param) : 0; + if (ocall_func) { + /* The current call is a reply to an OCALL request */ - rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); - if (rc) - goto out; + if (!call_ctx->rpc_shm) { + rc = -EINVAL; + goto exit; + } + + rc = process_ocall_reply(arg->ret, arg->ret_origin, + normal_param, num_normal_params, + ocall_param, call_ctx); + if (rc) + goto exit_cancel; + } else { + /* + * The current call is an invocation that may result in an OCALL + * request. + */ + + if (call_ctx->rpc_shm) { + rc = -EINVAL; + call_ctx->rpc_arg->ret = TEEC_ERROR_BAD_PARAMETERS; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + goto exit_cancel; + } + + call_ctx->msg_shm = get_msg_arg(ctx, num_normal_params, + &call_ctx->msg_arg, + &call_ctx->msg_parg); + if (IS_ERR(call_ctx->msg_shm)) { + rc = PTR_ERR(call_ctx->msg_shm); + goto exit_clear; + } - if (optee_do_call_with_arg(ctx, msg_parg)) { - msg_arg->ret = TEEC_ERROR_COMMUNICATION; - msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + call_ctx->ctx = ctx; + call_ctx->msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; + call_ctx->msg_arg->func = arg->func; + call_ctx->msg_arg->session = arg->session; + call_ctx->msg_arg->cancel_id = arg->cancel_id; + + rc = optee_to_msg_param(call_ctx->msg_arg->params, + num_normal_params, normal_param); + if (rc) { + tee_shm_free(call_ctx->msg_shm); + goto exit_clear; + } + + call_prologue(call_ctx); } - if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { - msg_arg->ret = TEEC_ERROR_COMMUNICATION; - msg_arg->ret_origin = TEEC_ORIGIN_COMMS; + rc = optee_do_call_with_ctx(call_ctx); + if (rc == -EAGAIN) { + rc = process_ocall_request(normal_param, num_normal_params, + ocall_param, call_ctx); + if (rc) + goto exit_cancel; + } else { + call_epilogue(call_ctx); + + arg->ret = call_ctx->msg_arg->ret; + arg->ret_origin = call_ctx->msg_arg->ret_origin; + + if (rc) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + if (optee_from_msg_param(normal_param, num_normal_params, + call_ctx->msg_arg->params)) { + arg->ret = TEEC_ERROR_COMMUNICATION; + arg->ret_origin = TEEC_ORIGIN_COMMS; + } + + tee_shm_free(call_ctx->msg_shm); + clear_call_ctx(call_ctx); + param_clear_ocall(ocall_param); } - arg->ret = msg_arg->ret; - arg->ret_origin = msg_arg->ret_origin; -out: - tee_shm_free(shm); + up(&sess->sem); + return rc; + +exit_cancel: + cancel_ocall(call_ctx); + optee_from_msg_param(normal_param, num_normal_params, + call_ctx->msg_arg->params); + tee_shm_free(call_ctx->msg_shm); + param_clear_ocall(ocall_param); +exit_clear: + clear_call_ctx(call_ctx); +exit: + up(&sess->sem); return rc; } +/* Requires @sem in the parent struct optee_session to be held */ +void optee_cancel_invoke_function_ocall(struct optee_call_ctx *call_ctx) +{ + call_ctx->rpc_arg->ret = TEEC_ERROR_TARGET_DEAD; + call_ctx->rpc_arg->ret_origin = TEEC_ORIGIN_COMMS; + + cancel_ocall(call_ctx); + + /* + * Decrease the ref count on all shared memory pointers passed into the + * original function invocation. + */ + process_ocall_memrefs(call_ctx->msg_arg->params, + call_ctx->msg_arg->num_params, false); + + tee_shm_free(call_ctx->msg_shm); + clear_call_ctx(call_ctx); +} + int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) { struct optee_context_data *ctxdata = ctx->data; @@ -583,6 +1163,9 @@ int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, phys_addr_t msg_parg; int rc; + if (shm->flags & TEE_SHM_OCALL) + return 0; + if (!num_pages) return -EINVAL; @@ -632,6 +1215,9 @@ int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) phys_addr_t msg_parg; int rc = 0; + if (shm->flags & TEE_SHM_OCALL) + return 0; + shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); if (IS_ERR(shm_arg)) return PTR_ERR(shm_arg); diff --git a/drivers/tee/optee/call_queue.c b/drivers/tee/optee/call_queue.c new file mode 100644 index 00000000000000..70922c040fc628 --- /dev/null +++ b/drivers/tee/optee/call_queue.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, Linaro Limited + */ + +#include "optee_private.h" + +void optee_cq_wait_init(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're preparing to make a call to secure world. In case we can't + * allocate a thread in secure world we'll end up waiting in + * optee_cq_wait_for_completion(). + * + * Normally if there's no contention in secure world the call will + * complete and we can cleanup directly with optee_cq_wait_final(). + */ + mutex_lock(&cq->mutex); + + /* + * We add ourselves to the queue, but we don't wait. This + * guarantees that we don't lose a completion if secure world + * returns busy and another thread just exited and try to complete + * someone. + */ + init_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +void optee_cq_wait_for_completion(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + wait_for_completion(&w->c); + + mutex_lock(&cq->mutex); + + /* Move to end of list to get out of the way for other waiters */ + list_del(&w->list_node); + reinit_completion(&w->c); + list_add_tail(&w->list_node, &cq->waiters); + + mutex_unlock(&cq->mutex); +} + +void optee_cq_complete_one(struct optee_call_queue *cq) +{ + struct optee_call_waiter *w; + + list_for_each_entry(w, &cq->waiters, list_node) { + if (!completion_done(&w->c)) { + complete(&w->c); + break; + } + } +} + +void optee_cq_wait_final(struct optee_call_queue *cq, + struct optee_call_waiter *w) +{ + /* + * We're done with the call to secure world. The thread in secure + * world that was used for this call is now available for some + * other task to use. + */ + mutex_lock(&cq->mutex); + + /* Get out of the list */ + list_del(&w->list_node); + + /* Wake up one eventual waiting task */ + optee_cq_complete_one(cq); + + /* + * If we're completed we've got a completion from another task that + * was just done with its call to secure world. Since yet another + * thread now is available in secure world wake up another eventual + * waiting task. + */ + if (completion_done(&w->c)) + optee_cq_complete_one(cq); + + mutex_unlock(&cq->mutex); +} diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index b373b1b08b6dee..21f9d570fe8eed 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include "optee_bench.h" #include "optee_private.h" #include "optee_smc.h" #include "shm_pool.h" @@ -216,6 +218,10 @@ static void optee_get_version(struct tee_device *teedev, if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) v.gen_caps |= TEE_GEN_CAP_REG_MEM; + if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL) + v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL; + if (optee->sec_caps & OPTEE_SMC_SEC_CAP_OCALL) + v.gen_caps |= TEE_GEN_CAP_OCALL; *vers = v; } @@ -261,6 +267,10 @@ static int optee_open(struct tee_context *ctx) } mutex_init(&ctxdata->mutex); INIT_LIST_HEAD(&ctxdata->sess_list); + idr_init(&ctxdata->tmp_sess_list); + + ctx->cap_memref_null = optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL; + ctx->cap_ocall = optee->sec_caps & OPTEE_SMC_SEC_CAP_OCALL; ctx->data = ctxdata; return 0; @@ -305,6 +315,7 @@ static void optee_release(struct tee_context *ctx) } kfree(sess); } + idr_destroy(&ctxdata->tmp_sess_list); kfree(ctxdata); if (!IS_ERR(shm)) @@ -321,9 +332,68 @@ static void optee_release(struct tee_context *ctx) } } +static void optee_pre_release(struct tee_context *ctx) +{ + struct optee_context_data *ctxdata = ctx->data; + struct optee_session *sess; + bool have_xa = false; + unsigned long i = 0; + struct xarray xa; + int id; + + if (!ctxdata) + return; + + /* + * Only if necessary, add into 'xa' sessions that have to have an OCALL + * cancelled instead of doing so in the loops to avoid calling into + * secure world with @mutex held. + */ + mutex_lock(&ctxdata->mutex); + idr_for_each_entry(&ctxdata->tmp_sess_list, sess, id) { + if (!have_xa) { + xa_init(&xa); + have_xa = true; + } + idr_remove(&ctxdata->tmp_sess_list, id); + xa_store(&xa, i++, xa_tag_pointer(sess, 1), GFP_KERNEL); + } + list_for_each_entry(sess, &ctxdata->sess_list, list_node) { + if (!sess->call_ctx.rpc_shm) + continue; + if (!have_xa) { + xa_init(&xa); + have_xa = true; + } + xa_store(&xa, i++, sess, GFP_KERNEL); + } + mutex_unlock(&ctxdata->mutex); + + if (!have_xa) + return; + + xa_for_each(&xa, i, sess) { + if (xa_pointer_tag(sess)) { + optee_cancel_open_session_ocall(xa_untag_pointer(sess)); + } else { + /* + * Holding @sem here while calling into secure world is + * fine seeing as there is no code path that would + * recursively acquire it. + */ + down(&sess->sem); + optee_cancel_invoke_function_ocall(&sess->call_ctx); + up(&sess->sem); + } + } + + xa_destroy(&xa); +} + static const struct tee_driver_ops optee_ops = { .get_version = optee_get_version, .open = optee_open, + .pre_release = optee_pre_release, .release = optee_release, .open_session = optee_open_session, .close_session = optee_close_session, @@ -602,6 +672,7 @@ static int optee_remove(struct platform_device *pdev) kfree(optee); + optee_bm_disable(); return 0; } @@ -703,6 +774,7 @@ static int optee_probe(struct platform_device *pdev) } pr_info("initialized driver\n"); + optee_bm_enable(); return 0; err: if (optee) { diff --git a/drivers/tee/optee/optee_bench.h b/drivers/tee/optee/optee_bench.h new file mode 100644 index 00000000000000..985e6a011f58f7 --- /dev/null +++ b/drivers/tee/optee/optee_bench.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _OPTEE_BENCH_H +#define _OPTEE_BENCH_H + +#include + +/* + * Cycle count divider is enabled (in PMCR), + * CCNT value is incremented every 64th clock cycle + */ +#define OPTEE_BENCH_DIVIDER 64 + +/* max amount of timestamps */ +#define OPTEE_BENCH_MAX_STAMPS 32 +#define OPTEE_BENCH_MAX_MASK (OPTEE_BENCH_MAX_STAMPS - 1) + +/* OP-TEE susbsystems ids */ +#define OPTEE_BENCH_KMOD 0x20000000 + +#define OPTEE_MSG_RPC_CMD_BENCH_REG_NEW 0 +#define OPTEE_MSG_RPC_CMD_BENCH_REG_DEL 1 + +/* storing timestamp */ +struct optee_time_st { + uint64_t cnt; /* stores value from CNTPCT register */ + uint64_t addr; /* stores value from program counter register */ + uint64_t src; /* OP-TEE subsystem id */ +}; + +/* per-cpu circular buffer for timestamps */ +struct optee_ts_cpu_buf { + uint64_t head; + uint64_t tail; + struct optee_time_st stamps[OPTEE_BENCH_MAX_STAMPS]; +}; + +/* memory layout for shared memory, where timestamps will be stored */ +struct optee_ts_global { + uint64_t cores; + struct optee_ts_cpu_buf cpu_buf[]; +}; + +extern struct optee_ts_global *optee_bench_ts_global; +extern struct rw_semaphore optee_bench_ts_rwsem; + +#ifdef CONFIG_OPTEE_BENCHMARK +void optee_bm_enable(void); +void optee_bm_disable(void); +void optee_bm_timestamp(void); +#else +static inline void optee_bm_enable(void) {} +static inline void optee_bm_disable(void) {} +static inline void optee_bm_timestamp(void) {} +#endif /* CONFIG_OPTEE_BENCHMARK */ +#endif /* _OPTEE_BENCH_H */ diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h index 795bc19ae17a6d..4c2aae8f932fe7 100644 --- a/drivers/tee/optee/optee_msg.h +++ b/drivers/tee/optee/optee_msg.h @@ -404,10 +404,13 @@ struct optee_msg_arg { */ #define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6 /* Memory that can be shared with a non-secure user space application */ -#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 +#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0 /* Memory only shared with non-secure kernel */ -#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 - +#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1 +/* Memory shared with non-secure kernel, but exported to userspace */ +#define OPTEE_MSG_RPC_SHM_TYPE_GLOBAL 2 +/* Memory shared with the requesting TA's Client Application */ +#define OPTEE_MSG_RPC_SHM_TYPE_CLIENT_APPL 3 /* * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC * @@ -419,4 +422,26 @@ struct optee_msg_arg { */ #define OPTEE_MSG_RPC_CMD_SHM_FREE 7 +/* + * Register timestamp buffer + * + * [in] param[0].u.value.a Subcommand (register buffer, unregister buffer) + * [in] param[0].u.value.b Physical address of timestamp buffer + * [in] param[0].u.value.c Size of buffer + */ +#define OPTEE_MSG_RPC_CMD_BENCH_REG 20 + +/* + * Send a command to the Client Application. + * + * [in] param[0].u.value[0].a command Id + * [out] param[0].u.value[0].b OCALL return value + * [out] param[0].u.value[0].c OCALL return value origin + * [in] param[0].u.value[1].a UUID of TA whence OCALL originated (Hi) + * [out] param[0].u.value[1].b UUID of TA whence OCALL originated (Lo) + * + * [in/out] any[2..5].* OCALL parameters as specified by the TA, if any + */ +#define OPTEE_MSG_RPC_CMD_OCALL 22 + #endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 8b71839a357ede..7e5679c74edbe5 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -16,10 +16,13 @@ /* Some Global Platform error codes used in this driver */ #define TEEC_SUCCESS 0x00000000 +#define TEEC_ERROR_CANCEL 0xFFFF0002 #define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A #define TEEC_ERROR_COMMUNICATION 0xFFFF000E #define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C #define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010 +#define TEEC_ERROR_TARGET_DEAD 0xFFFF3024 #define TEEC_ORIGIN_COMMS 0x00000002 @@ -97,15 +100,69 @@ struct optee { struct work_struct scan_bus_work; }; +struct optee_call_waiter { + struct list_head list_node; + struct completion c; +}; + +/** + * struct optee_call_ctx - holds context that is preserved during one STD call + * @pages_list: list of pages allocated for RPC requests + * @num_entries: number of pages in 'pages_list' + * @ctx: TEE context whence the OCALL originated, if any + * @msg_shm: shared memory object used for calling into OP-TEE + * @msg_arg: arguments used for calling into OP-TEE, namely the data + * behind 'msg_shm' + * @msg_parg: physical pointer underlying 'msg_shm' + * @rpc_must_release: indicates that OCALL parameters have had their refcount + * increased and must be decreased on cancellation + * @rpc_shm: shared memory object used for responding to RPCs + * @rpc_arg: arguments used for responding to RPCs, namely the data + * behind 'rpc_shm' + * @thread_id: secure thread Id whence the OCALL originated and which + * must be resumed when replying to the OCALL + * @waiter: object used to wait until a secure thread becomes + * available is the previous call into OP-TEE failed + * because all secure threads are in use + * @ocall_pages_list: list of pages allocated for OCALL requests + * @ocall_num_entries: number of pages in 'ocall_pages_list' + */ +struct optee_call_ctx { + /* Information about pages list used in last allocation */ + void *pages_list; + size_t num_entries; + + /* OCALL support */ + struct tee_context *ctx; + + struct tee_shm *msg_shm; + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + + bool rpc_must_release; + struct tee_shm *rpc_shm; + struct optee_msg_arg *rpc_arg; + + u32 thread_id; + struct optee_call_waiter waiter; + + void *ocall_pages_list; + size_t ocall_num_entries; +}; + struct optee_session { + /* Serializes access to this struct */ + struct semaphore sem; struct list_head list_node; u32 session_id; + struct optee_call_ctx call_ctx; }; struct optee_context_data { /* Serializes access to this struct */ struct mutex mutex; struct list_head sess_list; + struct idr tmp_sess_list; }; struct optee_rpc_param { @@ -119,20 +176,41 @@ struct optee_rpc_param { u32 a7; }; -/* Holds context that is preserved during one STD call */ -struct optee_call_ctx { - /* information about pages list used in last allocation */ - void *pages_list; - size_t num_entries; -}; +/* + * RPC support + */ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, struct optee_call_ctx *call_ctx); +bool optee_rpc_is_ocall(struct optee_rpc_param *param, + struct optee_call_ctx *call_ctx); +int optee_rpc_process_shm_alloc(struct tee_shm *shm, + struct optee_msg_param *msg_param, void **list); void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx); +/* + * Wait queue + */ + void optee_wait_queue_init(struct optee_wait_queue *wq); void optee_wait_queue_exit(struct optee_wait_queue *wq); +/* + * Call queue + */ + +void optee_cq_wait_init(struct optee_call_queue *cq, + struct optee_call_waiter *w); +void optee_cq_wait_for_completion(struct optee_call_queue *cq, + struct optee_call_waiter *w); +void optee_cq_complete_one(struct optee_call_queue *cq); +void optee_cq_wait_final(struct optee_call_queue *cq, + struct optee_call_waiter *w); + +/* + * Supplicant + */ + u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, struct tee_param *param); @@ -147,15 +225,40 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, struct tee_param *param); +/* + * Calls into OP-TEE + */ + u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg); + +/* + * Sessions + */ + int optee_open_session(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, - struct tee_param *param); + struct tee_param *normal_param, u32 num_normal_params, + struct tee_param *ocall_param); int optee_close_session(struct tee_context *ctx, u32 session); + +/* + * Function invocations + */ + int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, - struct tee_param *param); + struct tee_param *normal_param, u32 num_normal_params, + struct tee_param *ocall_param); + +/* + * Cancellations + */ + int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); +/* + * Shared memory + */ + void optee_enable_shm_cache(struct optee *optee); void optee_disable_shm_cache(struct optee *optee); @@ -169,20 +272,39 @@ int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, unsigned long start); int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm); +/* + * Paremeters + */ + int optee_from_msg_param(struct tee_param *params, size_t num_params, const struct optee_msg_param *msg_params); int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, const struct tee_param *params); +/* + * RPC memory + */ + u64 *optee_allocate_pages_list(size_t num_entries); void optee_free_pages_list(void *array, size_t num_entries); void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, size_t page_offset); +/* + * Devices + */ + #define PTA_CMD_GET_DEVICES 0x0 #define PTA_CMD_GET_DEVICES_SUPP 0x1 int optee_enumerate_devices(u32 func); +/* + * OCALLs + */ + +void optee_cancel_open_session_ocall(struct optee_session *sess); +void optee_cancel_invoke_function_ocall(struct optee_call_ctx *call_ctx); + /* * Small helpers */ diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index c72122d9c99724..689c05543bd2c1 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -215,6 +215,12 @@ struct optee_smc_get_shm_config_result { */ #define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2) +/* Secure world supports Shared Memory with a NULL buffer reference */ +#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4) + +/* Secure world is built with OCALL support */ +#define OPTEE_SMC_SEC_CAP_OCALL BIT(5) + #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index b4ade54d1f280a..37c5a74607e099 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -7,8 +7,10 @@ #include #include +#include #include #include +#include "optee_bench.h" #include "optee_private.h" #include "optee_smc.h" @@ -190,14 +192,67 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) return shm; } +int optee_rpc_process_shm_alloc(struct tee_shm *shm, + struct optee_msg_param *msg_param, void **list) +{ + phys_addr_t pa; + size_t sz; + size_t offs; + struct page **pages; + size_t page_num; + void *pages_list; + + if (tee_shm_get_pa(shm, 0, &pa)) + return -EINVAL; + + sz = tee_shm_get_size(shm); + + if (tee_shm_is_registered(shm)) { + pages = tee_shm_get_pages(shm, &page_num); + if (!pages || !page_num) + return -EINVAL; + + offs = tee_shm_get_page_offset(shm); + if (offs >= OPTEE_MSG_NONCONTIG_PAGE_SIZE) + return -EINVAL; + + pages_list = optee_allocate_pages_list(page_num); + if (!pages_list) + return -ENOMEM; + + /* + * In the least bits of u.tmem.buf_ptr we store buffer offset + * from 4k page, as described in OP-TEE ABI. + */ + + msg_param->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | + OPTEE_MSG_ATTR_NONCONTIG; + msg_param->u.tmem.buf_ptr = virt_to_phys(pages_list) | offs; + msg_param->u.tmem.size = sz; + msg_param->u.tmem.shm_ref = (unsigned long)shm; + optee_fill_pages_list(pages_list, pages, page_num, + tee_shm_get_page_offset(shm)); + *list = pages_list; + } else { + msg_param->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; + msg_param->u.tmem.buf_ptr = pa; + msg_param->u.tmem.size = sz; + msg_param->u.tmem.shm_ref = (unsigned long)shm; + *list = NULL; + } + + return 0; +} + static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { - phys_addr_t pa; struct tee_shm *shm; + void *pages_list; size_t sz; size_t n; + int rc; arg->ret_origin = TEEC_ORIGIN_COMMS; @@ -222,6 +277,9 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); break; + case OPTEE_MSG_RPC_SHM_TYPE_GLOBAL: + shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; return; @@ -232,52 +290,18 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, return; } - if (tee_shm_get_pa(shm, 0, &pa)) { + rc = optee_rpc_process_shm_alloc(shm, arg->params, &pages_list); + if (rc == -ENOMEM) { + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + goto bad; + } else if (rc) { arg->ret = TEEC_ERROR_BAD_PARAMETERS; goto bad; } - sz = tee_shm_get_size(shm); - if (tee_shm_is_registered(shm)) { - struct page **pages; - u64 *pages_list; - size_t page_num; - - pages = tee_shm_get_pages(shm, &page_num); - if (!pages || !page_num) { - arg->ret = TEEC_ERROR_OUT_OF_MEMORY; - goto bad; - } - - pages_list = optee_allocate_pages_list(page_num); - if (!pages_list) { - arg->ret = TEEC_ERROR_OUT_OF_MEMORY; - goto bad; - } - call_ctx->pages_list = pages_list; - call_ctx->num_entries = page_num; - - arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | - OPTEE_MSG_ATTR_NONCONTIG; - /* - * In the least bits of u.tmem.buf_ptr we store buffer offset - * from 4k page, as described in OP-TEE ABI. - */ - arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | - (tee_shm_get_page_offset(shm) & - (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); - arg->params[0].u.tmem.size = tee_shm_get_size(shm); - arg->params[0].u.tmem.shm_ref = (unsigned long)shm; - - optee_fill_pages_list(pages_list, pages, page_num, - tee_shm_get_page_offset(shm)); - } else { - arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; - arg->params[0].u.tmem.buf_ptr = pa; - arg->params[0].u.tmem.size = sz; - arg->params[0].u.tmem.shm_ref = (unsigned long)shm; + call_ctx->num_entries = shm->num_pages; } arg->ret = TEEC_SUCCESS; @@ -330,6 +354,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, cmd_free_suppl(ctx, shm); break; case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: + case OPTEE_MSG_RPC_SHM_TYPE_GLOBAL: tee_shm_free(shm); break; default: @@ -353,6 +378,50 @@ void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) free_pages_list(call_ctx); } +static void handle_rpc_func_cmd_bm_reg(struct optee_msg_arg *arg) +{ + u64 size; + u64 type; + u64 paddr; + + if (arg->num_params != 1) + goto bad; + + if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != + OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) + goto bad; + + type = arg->params[0].u.value.a; + switch (type) { + case OPTEE_MSG_RPC_CMD_BENCH_REG_NEW: + size = arg->params[0].u.value.c; + paddr = arg->params[0].u.value.b; + down_write(&optee_bench_ts_rwsem); + optee_bench_ts_global = + memremap(paddr, size, MEMREMAP_WB); + if (!optee_bench_ts_global) { + up_write(&optee_bench_ts_rwsem); + goto bad; + } + up_write(&optee_bench_ts_rwsem); + break; + case OPTEE_MSG_RPC_CMD_BENCH_REG_DEL: + down_write(&optee_bench_ts_rwsem); + if (optee_bench_ts_global) + memunmap(optee_bench_ts_global); + optee_bench_ts_global = NULL; + up_write(&optee_bench_ts_rwsem); + break; + default: + goto bad; + } + + arg->ret = TEEC_SUCCESS; + return; +bad: + arg->ret = TEEC_ERROR_BAD_PARAMETERS; +} + static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, struct tee_shm *shm, struct optee_call_ctx *call_ctx) @@ -382,6 +451,9 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, case OPTEE_MSG_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); break; + case OPTEE_MSG_RPC_CMD_BENCH_REG: + handle_rpc_func_cmd_bm_reg(arg); + break; default: handle_rpc_supp_cmd(ctx, arg); } @@ -441,3 +513,38 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; } + +bool optee_rpc_is_ocall(struct optee_rpc_param *param, + struct optee_call_ctx *call_ctx) +{ + u32 func; + u64 shm_type; + + struct tee_shm *shm; + struct optee_msg_arg *arg; + + func = OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0); + if (func != OPTEE_SMC_RPC_FUNC_CMD) + return false; + + shm = reg_pair_to_ptr(param->a1, param->a2); + arg = tee_shm_get_va(shm, 0); + + switch (arg->cmd) { + case OPTEE_MSG_RPC_CMD_SHM_ALLOC: + case OPTEE_MSG_RPC_CMD_SHM_FREE: + shm_type = arg->params[0].u.value.a; + if (shm_type != OPTEE_MSG_RPC_SHM_TYPE_CLIENT_APPL) + break; + fallthrough; + case OPTEE_MSG_RPC_CMD_OCALL: + call_ctx->rpc_shm = shm; + call_ctx->rpc_arg = arg; + call_ctx->thread_id = param->a3; + return true; + default: + break; + } + + return false; +} diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 64637e09a09536..42b59375974af7 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -98,6 +98,8 @@ void teedev_ctx_put(struct tee_context *ctx) static void teedev_close_context(struct tee_context *ctx) { + if (ctx->teedev->desc->ops->pre_release) + ctx->teedev->desc->ops->pre_release(ctx); tee_device_put(ctx->teedev); teedev_ctx_put(ctx); } @@ -200,7 +202,8 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, int name_len; int rc; - if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) { + if (connection_method == TEE_IOCTL_LOGIN_PUBLIC || + connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) { /* Nil UUID to be passed to TEE environment */ uuid_copy(uuid, &uuid_null); return 0; @@ -319,18 +322,20 @@ tee_ioctl_shm_register(struct tee_context *ctx, struct tee_ioctl_shm_register_data __user *udata) { long ret; + u32 flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED; struct tee_ioctl_shm_register_data data; struct tee_shm *shm; if (copy_from_user(&data, udata, sizeof(data))) return -EFAULT; - /* Currently no input flags are supported */ - if (data.flags) + if (data.flags & ~TEE_IOCTL_SHM_OCALL) return -EINVAL; - shm = tee_shm_register(ctx, data.addr, data.length, - TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED); + if (data.flags & TEE_IOCTL_SHM_OCALL) + flags |= TEE_SHM_OCALL; + + shm = tee_shm_register(ctx, data.addr, data.length, flags); if (IS_ERR(shm)) return PTR_ERR(shm); @@ -351,9 +356,117 @@ tee_ioctl_shm_register(struct tee_context *ctx, return ret; } -static int params_from_user(struct tee_context *ctx, struct tee_param *params, - size_t num_params, - struct tee_ioctl_param __user *uparams) +static int tee_ioctl_shm_register_fd(struct tee_context *ctx, + struct tee_ioctl_shm_register_fd_data __user *udata) +{ + struct tee_ioctl_shm_register_fd_data data; + struct tee_shm *shm; + long ret; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + shm = tee_shm_register_fd(ctx, data.fd); + if (IS_ERR_OR_NULL(shm)) + return -EINVAL; + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + +static bool param_is_ocall_reply(struct tee_ioctl_param *param) +{ + u64 type = param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; + + return param->attr & TEE_IOCTL_PARAM_ATTR_OCALL && + type == TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT && + param->a; +} + +static bool param_is_ocall_request(struct tee_param *param) +{ + u64 type = param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; + + return param->attr & TEE_IOCTL_PARAM_ATTR_OCALL && + type == TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT && + param->u.value.a; +} + +static bool param_is_ocall_request_safe(struct tee_param *param) +{ + return param ? param_is_ocall_request(param) : false; +} + +static bool param_is_ocall(struct tee_param *param) +{ + u64 type = param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; + + return param->attr & TEE_IOCTL_PARAM_ATTR_OCALL && + type == TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; +} + +static struct tee_shm *shm_from_user(struct tee_context *ctx, + struct tee_ioctl_param *ip) +{ + struct tee_shm *shm = ERR_PTR(-EINVAL); + + /* + * If a NULL pointer is passed to a TA in the TEE, + * the ip->c IOCTL parameters is set to TEE_MEMREF_NULL + * indicating a NULL memory reference. + */ + if (ip->c != TEE_MEMREF_NULL) { + /* + * If we fail to get a pointer to a shared + * memory object (and increase the ref count) + * from an identifier we return an error. All + * pointers that has been added in params have + * an increased ref count. It's the callers + * responibility to do tee_shm_put() on all + * resolved pointers. + */ + shm = tee_shm_get_from_id(ctx, ip->c); + if (IS_ERR(shm)) + return shm; + + /* + * Ensure offset + size does not overflow + * offset and does not overflow the size of + * the referred shared memory object. + */ + if ((ip->a + ip->b) < ip->a || (ip->a + ip->b) > shm->size) { + tee_shm_put(shm); + return ERR_PTR(-EINVAL); + } + } else if (ctx->cap_memref_null) { + /* Pass NULL pointer to OP-TEE */ + shm = NULL; + } + + return shm; +} + +static int params_from_user_normal(struct tee_context *ctx, + struct tee_param *params, size_t num_params, + struct tee_ioctl_param __user *uparams) { size_t n; @@ -382,28 +495,63 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - /* - * If we fail to get a pointer to a shared memory - * object (and increase the ref count) from an - * identifier we return an error. All pointers that - * has been added in params have an increased ref - * count. It's the callers responibility to do - * tee_shm_put() on all resolved pointers. - */ - shm = tee_shm_get_from_id(ctx, ip.c); + shm = shm_from_user(ctx, &ip); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + params[n].u.memref.shm_offs = ip.a; + params[n].u.memref.size = ip.b; + params[n].u.memref.shm = shm; + break; + default: + /* Unknown attribute */ + return -EINVAL; + } + } + return 0; +} + +static int params_from_user_ocall(struct tee_context *ctx, + struct tee_param *params, size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_shm *shm; + struct tee_ioctl_param ip; + + if (copy_from_user(&ip, uparams + n, sizeof(ip))) + return -EFAULT; + + /* All unused attribute bits has to be zero */ + if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK) + return -EINVAL; + + params[n].attr = ip.attr; + switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + params[n].u.value.a = ip.a; + params[n].u.value.b = ip.b; + params[n].u.value.c = ip.c; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + shm = shm_from_user(ctx, &ip); if (IS_ERR(shm)) return PTR_ERR(shm); /* - * Ensure offset + size does not overflow offset - * and does not overflow the size of the referred - * shared memory object. + * Reference counting for OCALL memref parameters is + * handled by the TEE-specific driver as necessary. */ - if ((ip.a + ip.b) < ip.a || - (ip.a + ip.b) > shm->size) { + if (shm) tee_shm_put(shm); - return -EINVAL; - } params[n].u.memref.shm_offs = ip.a; params[n].u.memref.size = ip.b; @@ -417,8 +565,25 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, return 0; } -static int params_to_user(struct tee_ioctl_param __user *uparams, - size_t num_params, struct tee_param *params) +static int params_from_user(struct tee_context *ctx, struct tee_param *params, + size_t num_params, + struct tee_ioctl_param __user *uparams) +{ + struct tee_ioctl_param ip; + + if (!num_params) + return 0; + + if (copy_from_user(&ip, uparams, sizeof(ip))) + return -EFAULT; + + return param_is_ocall_reply(&ip) + ? params_from_user_ocall(ctx, params, num_params, uparams) + : params_from_user_normal(ctx, params, num_params, uparams); +} + +static int params_to_user_normal(struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) { size_t n; @@ -426,7 +591,7 @@ static int params_to_user(struct tee_ioctl_param __user *uparams, struct tee_ioctl_param __user *up = uparams + n; struct tee_param *p = params + n; - switch (p->attr) { + switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: if (put_user(p->u.value.a, &up->a) || @@ -445,6 +610,87 @@ static int params_to_user(struct tee_ioctl_param __user *uparams, return 0; } +static int params_to_user_ocall(struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + struct tee_ioctl_param __user *up = uparams + n; + struct tee_param *p = params + n; + + if (put_user(p->attr, &up->attr)) + return -EFAULT; + + switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: + if (put_user(p->u.value.a, &up->a) || + put_user(p->u.value.b, &up->b) || + put_user(p->u.value.c, &up->c)) + return -EFAULT; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + if (p->u.memref.shm) { + if ((put_user((u64)p->u.memref.shm_offs, + &up->a) || + put_user((u64)p->u.memref.size, &up->b) || + put_user(p->u.memref.shm->id, &up->c))) + return -EFAULT; + } else { + if (put_user(0, &up->a) || + put_user(0, &up->b) || + put_user(TEE_MEMREF_NULL, &up->c)) + return -EFAULT; + } + break; + default: + break; + } + } + return 0; +} + +static int params_to_user(struct tee_ioctl_param __user *uparams, + size_t num_params, struct tee_param *params) +{ + if (!num_params) + return 0; + + return param_is_ocall_request(params) + ? params_to_user_ocall(uparams, num_params, params) + : params_to_user_normal(uparams, num_params, params); +} + +static inline int find_ocall_param(struct tee_param *params, u32 num_params, + struct tee_param **normal_params, + u32 *num_normal_params, + struct tee_param **ocall_param) +{ + size_t n; + + for (n = 0; n < num_params; n++) { + if (param_is_ocall(params + n)) { + if (n == 0) { + *normal_params = params + 1; + *num_normal_params = num_params - 1; + *ocall_param = params; + return 0; + } else { + return -EINVAL; + } + } + } + + *normal_params = params; + *num_normal_params = num_params; + *ocall_param = NULL; + + return 0; +} + static int tee_ioctl_open_session(struct tee_context *ctx, struct tee_ioctl_buf_data __user *ubuf) { @@ -455,6 +701,9 @@ static int tee_ioctl_open_session(struct tee_context *ctx, struct tee_ioctl_open_session_arg arg; struct tee_ioctl_param __user *uparams = NULL; struct tee_param *params = NULL; + struct tee_param *ocall_param = NULL; + struct tee_param *normal_params = NULL; + u32 num_normal_params = 0; bool have_session = false; if (!ctx->teedev->desc->ops->open_session) @@ -483,6 +732,10 @@ static int tee_ioctl_open_session(struct tee_context *ctx, rc = params_from_user(ctx, params, arg.num_params, uparams); if (rc) goto out; + rc = find_ocall_param(params, arg.num_params, &normal_params, + &num_normal_params, &ocall_param); + if (rc) + goto out; } if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN && @@ -492,7 +745,9 @@ static int tee_ioctl_open_session(struct tee_context *ctx, goto out; } - rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params); + rc = ctx->teedev->desc->ops->open_session(ctx, &arg, normal_params, + num_normal_params, + ocall_param); if (rc) goto out; have_session = true; @@ -514,10 +769,11 @@ static int tee_ioctl_open_session(struct tee_context *ctx, if (params) { /* Decrease ref count for all valid shared memory pointers */ - for (n = 0; n < arg.num_params; n++) - if (tee_param_is_memref(params + n) && - params[n].u.memref.shm) - tee_shm_put(params[n].u.memref.shm); + if (!param_is_ocall_request_safe(ocall_param)) + for (n = 0; n < arg.num_params; n++) + if (tee_param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); kfree(params); } @@ -534,6 +790,9 @@ static int tee_ioctl_invoke(struct tee_context *ctx, struct tee_ioctl_invoke_arg arg; struct tee_ioctl_param __user *uparams = NULL; struct tee_param *params = NULL; + struct tee_param *ocall_param = NULL; + struct tee_param *normal_params = NULL; + u32 num_normal_params = 0; if (!ctx->teedev->desc->ops->invoke_func) return -EINVAL; @@ -561,9 +820,23 @@ static int tee_ioctl_invoke(struct tee_context *ctx, rc = params_from_user(ctx, params, arg.num_params, uparams); if (rc) goto out; + + /* + * The OCALL parameter must be first so that we know how to + * process the remainder of the parameters, or not be present at + * all. This function returns an error if the OCALL parameter is + * found in the wrong place. If it is not found, 'ocall_param' + * remains NULL. + */ + rc = find_ocall_param(params, arg.num_params, &normal_params, + &num_normal_params, &ocall_param); + if (rc) + goto out; } - rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params); + rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, normal_params, + num_normal_params, + ocall_param); if (rc) goto out; @@ -575,11 +848,22 @@ static int tee_ioctl_invoke(struct tee_context *ctx, rc = params_to_user(uparams, arg.num_params, params); out: if (params) { - /* Decrease ref count for all valid shared memory pointers */ - for (n = 0; n < arg.num_params; n++) - if (tee_param_is_memref(params + n) && - params[n].u.memref.shm) - tee_shm_put(params[n].u.memref.shm); + /* + * Decrease the ref count for all valid shared memory pointers + * if this is a normal return. If returning with an OCALL + * request, the parameters should have been overwritten with + * those of the OCALL. The original parameters, and thus the + * memrefs carrying the SHMs whose ref count was increased on + * entry, shall be restored once the full OCALL sequence is + * finished. When that happens, we decrease the ref count on + * them. Otherwise, we leave the SHMs be; the TEE-specific + * driver should have dealt with their ref counts already. + */ + if (!param_is_ocall_request_safe(ocall_param)) + for (n = 0; n < arg.num_params; n++) + if (tee_param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); kfree(params); } return rc; @@ -811,6 +1095,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return tee_ioctl_shm_alloc(ctx, uarg); case TEE_IOC_SHM_REGISTER: return tee_ioctl_shm_register(ctx, uarg); + case TEE_IOC_SHM_REGISTER_FD: + return tee_ioctl_shm_register_fd(ctx, uarg); case TEE_IOC_OPEN_SESSION: return tee_ioctl_open_session(ctx, uarg); case TEE_IOC_INVOKE: @@ -1166,9 +1452,22 @@ int tee_client_open_session(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, struct tee_param *param) { + struct tee_param *ocall_param = NULL; + struct tee_param *normal_params = NULL; + u32 num_normal_params = 0; + int rc; + if (!ctx->teedev->desc->ops->open_session) return -EINVAL; - return ctx->teedev->desc->ops->open_session(ctx, arg, param); + + rc = find_ocall_param(param, arg->num_params, &normal_params, + &num_normal_params, &ocall_param); + if (rc) + return rc; + + return ctx->teedev->desc->ops->open_session(ctx, arg, normal_params, + num_normal_params, + ocall_param); } EXPORT_SYMBOL_GPL(tee_client_open_session); @@ -1184,9 +1483,22 @@ int tee_client_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, struct tee_param *param) { + struct tee_param *ocall_param = NULL; + struct tee_param *normal_params = NULL; + u32 num_normal_params = 0; + int rc; + if (!ctx->teedev->desc->ops->invoke_func) return -EINVAL; - return ctx->teedev->desc->ops->invoke_func(ctx, arg, param); + + rc = find_ocall_param(param, arg->num_params, &normal_params, + &num_normal_params, &ocall_param); + if (rc) + return rc; + + return ctx->teedev->desc->ops->invoke_func(ctx, arg, normal_params, + num_normal_params, + ocall_param); } EXPORT_SYMBOL_GPL(tee_client_invoke_func); diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 827ac3d0fea96c..82bf8b1cd1d29b 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -12,6 +12,14 @@ #include #include "tee_private.h" +/* extra references appended to shm object for registered shared memory */ +struct tee_shm_dmabuf_ref { + struct tee_shm shm; + struct dma_buf *dmabuf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; +}; + static void tee_shm_release(struct tee_shm *shm) { struct tee_device *teedev = shm->ctx->teedev; @@ -22,7 +30,15 @@ static void tee_shm_release(struct tee_shm *shm) mutex_unlock(&teedev->mutex); } - if (shm->flags & TEE_SHM_POOL) { + if (shm->flags & TEE_SHM_EXT_DMA_BUF) { + struct tee_shm_dmabuf_ref *ref; + + ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); + dma_buf_unmap_attachment(ref->attach, ref->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(shm->dmabuf, ref->attach); + dma_buf_put(ref->dmabuf); + } else if (shm->flags & TEE_SHM_POOL) { struct tee_shm_pool_mgr *poolm; if (shm->flags & TEE_SHM_DMA_BUF) @@ -48,7 +64,6 @@ static void tee_shm_release(struct tee_shm *shm) teedev_ctx_put(shm->ctx); kfree(shm); - tee_device_put(teedev); } @@ -193,7 +208,8 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, int num_pages; unsigned long start; - if (flags != req_user_flags && flags != req_kernel_flags) + if (((flags & req_user_flags) != req_user_flags) && + ((flags & req_kernel_flags) != req_kernel_flags)) return ERR_PTR(-ENOTSUPP); if (!tee_device_get(teedev)) @@ -312,6 +328,98 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, } EXPORT_SYMBOL_GPL(tee_shm_register); +struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd) +{ + struct tee_shm_dmabuf_ref *ref; + void *rc; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + if (!tee_device_get(ctx->teedev)) + return ERR_PTR(-EINVAL); + + teedev_ctx_get(ctx); + + ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!ref) { + rc = ERR_PTR(-ENOMEM); + goto err; + } + + ref->shm.ctx = ctx; + ref->shm.id = -1; + + ref->dmabuf = dma_buf_get(fd); + if (!ref->dmabuf) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + ref->attach = dma_buf_attach(ref->dmabuf, &ctx->teedev->dev); + if (IS_ERR_OR_NULL(ref->attach)) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + ref->sgt = dma_buf_map_attachment(ref->attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(ref->sgt)) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + if (sg_nents(ref->sgt->sgl) != 1) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + ref->shm.paddr = sg_dma_address(ref->sgt->sgl); + ref->shm.size = sg_dma_len(ref->sgt->sgl); + ref->shm.flags = TEE_SHM_DMA_BUF | TEE_SHM_EXT_DMA_BUF; + + mutex_lock(&ctx->teedev->mutex); + ref->shm.id = idr_alloc(&ctx->teedev->idr, &ref->shm, + 1, 0, GFP_KERNEL); + mutex_unlock(&ctx->teedev->mutex); + if (ref->shm.id < 0) { + rc = ERR_PTR(ref->shm.id); + goto err; + } + + /* export a dmabuf to later get a userland ref */ + exp_info.ops = &tee_shm_dma_buf_ops; + exp_info.size = ref->shm.size; + exp_info.flags = O_RDWR; + exp_info.priv = &ref->shm; + + ref->shm.dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(ref->shm.dmabuf)) { + rc = ERR_PTR(-EINVAL); + goto err; + } + + return &ref->shm; + +err: + if (ref) { + if (ref->shm.id >= 0) { + mutex_lock(&ctx->teedev->mutex); + idr_remove(&ctx->teedev->idr, ref->shm.id); + mutex_unlock(&ctx->teedev->mutex); + } + if (ref->sgt) + dma_buf_unmap_attachment(ref->attach, ref->sgt, + DMA_BIDIRECTIONAL); + if (ref->attach) + dma_buf_detach(ref->dmabuf, ref->attach); + if (ref->dmabuf) + dma_buf_put(ref->dmabuf); + } + kfree(ref); + teedev_ctx_put(ctx); + tee_device_put(ctx->teedev); + return rc; +} +EXPORT_SYMBOL_GPL(tee_shm_register_fd); + /** * tee_shm_get_fd() - Increase reference count and return file descriptor * @shm: Shared memory handle @@ -463,6 +571,17 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) } EXPORT_SYMBOL_GPL(tee_shm_get_from_id); +/** + * tee_shm_get() - Increase reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_get(struct tee_shm *shm) +{ + if (shm->flags & TEE_SHM_DMA_BUF) + get_dma_buf(shm->dmabuf); +} +EXPORT_SYMBOL_GPL(tee_shm_get); + /** * tee_shm_put() - Decrease reference count on a shared memory handle * @shm: Shared memory handle diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index d074302989ddd2..ef141997c7cd39 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -27,6 +27,7 @@ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ #define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ +#define TEE_SHM_OCALL BIT(7) /* Memory used for an OCALL */ struct device; struct tee_device; @@ -35,18 +36,22 @@ struct tee_shm_pool; /** * struct tee_context - driver specific context on file pointer data - * @teedev: pointer to this drivers struct tee_device - * @list_shm: List of shared memory object owned by this context - * @data: driver specific context data, managed by the driver - * @refcount: reference counter for this structure - * @releasing: flag that indicates if context is being released right now. - * It is needed to break circular dependency on context during - * shared memory release. - * @supp_nowait: flag that indicates that requests in this context should not - * wait for tee-supplicant daemon to be started if not present - * and just return with an error code. It is needed for requests - * that arises from TEE based kernel drivers that should be - * non-blocking in nature. + * @teedev: pointer to this drivers struct tee_device + * @list_shm: List of shared memory object owned by this context + * @data: driver specific context data, managed by the driver + * @refcount: reference counter for this structure + * @releasing: flag that indicates if context is being released right now. + * It is needed to break circular dependency on context during + * shared memory release. + * @supp_nowait: flag that indicates that requests in this context should + * not wait for tee-supplicant daemon to be started if not + * present and just return with an error code. It is needed + * for requests that arises from TEE based kernel drivers that + * should be non-blocking in nature. + * @cap_memref_null: flag indicating if the TEE Client support shared + * memory buffer with a NULL pointer. + * @cap_ocall: flag indicating that OP-TEE supports OCALLs, allowing TAs + * to invoke commands on their CA. */ struct tee_context { struct tee_device *teedev; @@ -54,6 +59,8 @@ struct tee_context { struct kref refcount; bool releasing; bool supp_nowait; + bool cap_memref_null; + bool cap_ocall; }; struct tee_param_memref { @@ -80,6 +87,7 @@ struct tee_param { * struct tee_driver_ops - driver operations vtable * @get_version: returns version of driver * @open: called when the device file is opened + * @pre_release: called prior to context release, before release proper * @release: release this open file * @open_session: open a new session * @close_session: close a session @@ -94,14 +102,19 @@ struct tee_driver_ops { void (*get_version)(struct tee_device *teedev, struct tee_ioctl_version_data *vers); int (*open)(struct tee_context *ctx); + void (*pre_release)(struct tee_context *ctx); void (*release)(struct tee_context *ctx); int (*open_session)(struct tee_context *ctx, struct tee_ioctl_open_session_arg *arg, - struct tee_param *param); + struct tee_param *normal_param, + u32 num_normal_params, + struct tee_param *ocall_param); int (*close_session)(struct tee_context *ctx, u32 session); int (*invoke_func)(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, - struct tee_param *param); + struct tee_param *normal_param, + u32 num_normal_params, + struct tee_param *ocall_param); int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, struct tee_param *param); @@ -342,6 +355,16 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags); +/** + * tee_shm_register_fd() - Register shared memory from file descriptor + * + * @ctx: Context that allocates the shared memory + * @fd: shared memory file descriptor reference. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd); + /** * tee_shm_is_registered() - Check if shared memory object in registered in TEE * @shm: Shared memory handle @@ -358,6 +381,12 @@ static inline bool tee_shm_is_registered(struct tee_shm *shm) */ void tee_shm_free(struct tee_shm *shm); +/** + * tee_shm_get() - Increase reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_get(struct tee_shm *shm); + /** * tee_shm_put() - Decrease reference count on a shared memory handle * @shm: Shared memory handle diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index b619f37ee03e53..54faa0c23418c5 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -43,14 +43,20 @@ #define TEE_IOC_BASE 0 /* Flags relating to shared memory */ +#define TEE_IOCTL_SHM_NONE 0x0 /* no flags */ #define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */ #define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */ +#define TEE_IOCTL_SHM_OCALL 0x4 /* memory used for an OCALL */ #define TEE_MAX_ARG_SIZE 1024 #define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ #define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */ #define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */ +#define TEE_GEN_CAP_MEMREF_NULL (1 << 3)/* NULL MemRef support */ +#define TEE_GEN_CAP_OCALL (1 << 4)/* Supports calls from TA to CA */ + +#define TEE_MEMREF_NULL (__u64)(-1) /* NULL MemRef Buffer */ /* * TEE Implementation ID @@ -118,6 +124,35 @@ struct tee_ioctl_shm_alloc_data { #define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \ struct tee_ioctl_shm_alloc_data) +/** + * struct tee_ioctl_shm_register_fd_data - Shared memory registering argument + * @fd: [in] file descriptor identifying the shared memory + * @size: [out] Size of shared memory to allocate + * @flags: [in] Flags to/from allocation. + * @id: [out] Identifier of the shared memory + * + * The flags field should currently be zero as input. Updated by the call + * with actual flags as defined by TEE_IOCTL_SHM_* above. + * This structure is used as argument for TEE_IOC_SHM_ALLOC below. + */ +struct tee_ioctl_shm_register_fd_data { + __s64 fd; + __u64 size; + __u32 flags; + __s32 id; +} __aligned(8); + +/** + * TEE_IOC_SHM_REGISTER_FD - register a shared memory from a file descriptor + * + * Returns a file descriptor on success or < 0 on failure + * + * The returned file descriptor refers to the shared memory object in kernel + * land. The shared memory is freed when the descriptor is closed. + */ +#define TEE_IOC_SHM_REGISTER_FD _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 8, \ + struct tee_ioctl_shm_register_fd_data) + /** * struct tee_ioctl_buf_data - Variable sized buffer * @buf_ptr: [in] A __user pointer to a buffer @@ -159,9 +194,14 @@ struct tee_ioctl_buf_data { /* Meta parameter carrying extra information about the message. */ #define TEE_IOCTL_PARAM_ATTR_META 0x100 +/* Parameter carrying information about an OCALL reply or request. */ +#define TEE_IOCTL_PARAM_ATTR_OCALL 0x200 + /* Mask of all known attr bits */ #define TEE_IOCTL_PARAM_ATTR_MASK \ - (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META) + (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | \ + TEE_IOCTL_PARAM_ATTR_META | \ + TEE_IOCTL_PARAM_ATTR_OCALL) /* * Matches TEEC_LOGIN_* in GP TEE Client API @@ -200,6 +240,16 @@ struct tee_ioctl_buf_data { * a part of a shared memory by specifying an offset (@a) and size (@b) of * the object. To supply the entire shared memory object set the offset * (@a) to 0 and size (@b) to the previously returned size of the object. + * + * A client may need to present a NULL pointer in the argument + * passed to a trusted application in the TEE. + * This is also a requirement in GlobalPlatform Client API v1.0c + * (section 3.2.5 memory references), which can be found at + * http://www.globalplatform.org/specificationsdevice.asp + * + * If a NULL pointer is passed to a TA in the TEE, the (@c) + * IOCTL parameters value must be set to TEE_MEMREF_NULL indicating a NULL + * memory reference. */ struct tee_ioctl_param { __u64 attr; @@ -244,6 +294,54 @@ struct tee_ioctl_open_session_arg { #define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \ struct tee_ioctl_buf_data) +/* + * Command sent to the CA to request allocation of shared memory to carry the + * parameters of an OCALL + * + * [in] param[0].u.value.b requested memory size + * [out] param[0].u.value.c SHM ID + * + * Note: [in] means from driver to CA, [out], from CA to driver. + */ +#define TEE_IOCTL_OCALL_CMD_SHM_ALLOC 1 + +/* + * Command sent to the CA to free previously allocated shared memory. + * + * [in] param[0].u.value.c SHM ID + * + * Note: [in] means from driver to CA. + */ +#define TEE_IOCTL_OCALL_CMD_SHM_FREE 2 + +/* + * Command sent to the CA to execute an OCALL by Id. + * + * [any] param[0..3].u.* carry OCALL parameters + */ +#define TEE_IOCTL_OCALL_CMD_INVOKE 3 + +/* + * Join the Id of the function that the TEE Client API must execute on behalf of + * the CA with the Id of the command that the CA must execute + * + * As an example, TEE_IOCTL_OCALL_MAKE_PAIR(TEE_IOCTL_OCALL_CMD_INVOKE, 10) + * means that the Client API must forward a function invocation to a CA-provided + * handler, and the handler must execute command Id '10', whose meaning is up to + * the user-defined contract between the CA & TA. + */ +#define TEE_IOCTL_OCALL_MAKE_PAIR(func, cmd) \ + (((__u64)(func) << 32) | (__u32)(cmd)) + +/* + * Get the Id of the function that the TEE Client API must execute on behalf of + * the CA + */ +#define TEE_IOCTL_OCALL_GET_FUNC(x) ((__u32)((x) >> 32)) + +/* Get the Id of the command that the CA must execute */ +#define TEE_IOCTL_OCALL_GET_CMD(x) ((__u32)(x)) + /** * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted * Application diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 504d2e431c6041..9998340d69c676 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -65,6 +65,7 @@ # git output parsing needs US English output, so first set backtick child process LANGUAGE my $git_command ='export LANGUAGE=en_US.UTF-8; git'; my $tabsize = 8; +my ${CONFIG_} = "CONFIG_"; sub help { my ($exitcode) = @_; @@ -127,6 +128,8 @@ sub help { --typedefsfile Read additional types from this file --color[=WHEN] Use colors 'always', 'never', or only when output is a terminal ('auto'). Default is 'auto'. + --kconfig-prefix=WORD use WORD as a prefix for Kconfig symbols (default + ${CONFIG_}) -h, --help, --version display this help and exit When FILE is - read standard input. @@ -235,6 +238,7 @@ sub list_types { 'color=s' => \$color, 'no-color' => \$color, #keep old behaviors of -nocolor 'nocolor' => \$color, #keep old behaviors of -nocolor + 'kconfig-prefix=s' => \${CONFIG_}, 'h|help' => \$help, 'version' => \$help ) or help(1); @@ -6524,16 +6528,16 @@ sub process { } # check for IS_ENABLED() without CONFIG_ ($rawline for comments too) - if ($rawline =~ /\bIS_ENABLED\s*\(\s*(\w+)\s*\)/ && $1 !~ /^CONFIG_/) { + if ($rawline =~ /\bIS_ENABLED\s*\(\s*(\w+)\s*\)/ && $1 !~ /^${CONFIG_}/) { WARN("IS_ENABLED_CONFIG", - "IS_ENABLED($1) is normally used as IS_ENABLED(CONFIG_$1)\n" . $herecurr); + "IS_ENABLED($1) is normally used as IS_ENABLED(${CONFIG_}$1)\n" . $herecurr); } # check for #if defined CONFIG_ || defined CONFIG__MODULE - if ($line =~ /^\+\s*#\s*if\s+defined(?:\s*\(?\s*|\s+)(CONFIG_[A-Z_]+)\s*\)?\s*\|\|\s*defined(?:\s*\(?\s*|\s+)\1_MODULE\s*\)?\s*$/) { + if ($line =~ /^\+\s*#\s*if\s+defined(?:\s*\(?\s*|\s+)(${CONFIG_}[A-Z_]+)\s*\)?\s*\|\|\s*defined(?:\s*\(?\s*|\s+)\1_MODULE\s*\)?\s*$/) { my $config = $1; if (WARN("PREFER_IS_ENABLED", - "Prefer IS_ENABLED() to CONFIG_ || CONFIG__MODULE\n" . $herecurr) && + "Prefer IS_ENABLED() to ${CONFIG_} || ${CONFIG_}_MODULE\n" . $herecurr) && $fix) { $fixed[$fixlinenr] = "\+#if IS_ENABLED($config)"; } diff --git a/upstream-tee-subsys-patches.txt b/upstream-tee-subsys-patches.txt new file mode 100644 index 00000000000000..5d69a73616e39a --- /dev/null +++ b/upstream-tee-subsys-patches.txt @@ -0,0 +1,102 @@ +Patches relating to the TEE subsystem +===================================== + +This is a list of all the patches that relates to the TEE subsystem. The +text inside the brackets are the kernel version where it was introduced, +followed by the sha1 hash in the upstream kernel tree. + +[v5.9-rc1] 5f178bb71e3a optee: enable support for multi-stage bus enumeration +[v5.9-rc1] 58df195cd47d optee: use uuid for sysfs driver entry +[v5.8-rc1] d8ed45c5dcd4 mmap locking API: use coccinelle to convert mmap_sem rwsem call sites +[v5.8-rc1] 60b4000f5464 tee: fix crypto select +[v5.8-rc1] c5b4312bea5d tee: optee: Add support for session login client UUID generation +[v5.8-rc1] e33bcbab16d1 tee: add support for session's client UUID generation +[v5.8-rc1] 104edb94cc4b tee: add private login method for kernel clients +[v5.8-rc1] 2a6ba3f794e8 tee: enable support to register kernel memory +[v5.8-rc1] 1115899e7aad tee: remove unnecessary NULL check in tee_shm_alloc() +[v5.7-rc1] 758ecf13a41a tee: tee_shm_op_mmap(): use TEE_SHM_USER_MAPPED +[v5.7-rc1] 5271b2011e44 tee: remove redundant teedev in struct tee_shm +[v5.7-rc1] f1bbacedb0af tee: don't assign shm id for private shms +[v5.7-rc1] c180f9bbe29a tee: remove unused tee_shm_priv_alloc() +[v5.7-rc1] 59a135f6fb66 tee: remove linked list of struct tee_shm +[v5.6] 36fa3e50085e tee: amdtee: out of bounds read in find_session() +[v5.6-rc5] b83685bceedb tee: amdtee: fix memory leak in amdtee_open_session() +[v5.6-rc4] 872d92dec353 tee: amdtee: amdtee depends on CRYPTO_DEV_CCP_DD +[v5.6-rc1] 48d625e4c4ce tee: fix memory allocation failure checks on drv_data and amdtee +[v5.6-rc1] 279c075dc1d2 tee: amdtee: remove redundant NULL check for pool +[v5.6-rc1] f9568eae9247 tee: amdtee: rename err label to err_device_unregister +[v5.6-rc1] 2929015535fa tee: amdtee: skip tee_device_unregister if tee_device_alloc fails +[v5.6-rc1] f4c58c3758f9 tee: amdtee: print error message if tee not present +[v5.6-rc1] 5ae63958a6de tee: amdtee: remove unused variable initialization +[v5.6-rc1] bade7e1fbd34 tee: amdtee: check TEE status during driver initialization +[v5.6-rc1] 757cc3e9ff1d tee: add AMD-TEE driver +[v5.6-rc1] 1a74fa3894e7 tee: allow compilation of tee subsystem for AMD CPUs +[v5.6-rc1] f349710e413a optee: model OP-TEE as a platform device/driver +[v5.6-rc1] 42aa7c6eb3eb drm/tee_shm: Drop dma_buf_k(unmap) support +[v5.5] 9e0caab8e0f9 tee: optee: Fix compilation issue with nommu +[v5.5-rc7] 5a769f6ff439 optee: Fix multi page dynamic shm pool alloc +[v5.5-rc1] 03212e347f94 tee: optee: fix device enumeration error handling +[v5.5-rc1] a249dd200d03 tee: optee: Fix dynamic shm pool allocations +[v5.5-rc1] 1832f2d8ff69 compat_ioctl: move more drivers to compat_ptr_ioctl +[v5.4-rc1] 9f02b8f61f29 tee: optee: add might_sleep for RPC requests +[v5.2-rc1] 9733b072a12a optee: allow to work without static shared memory +[v5.1-rc1] 32356d309c22 tee: optee: update optee_msg.h and optee_smc.h to dual license +[v5.1-rc1] 4f062dc1b759 tee: add cancellation support to client interface +[v5.1-rc1] 62ade1bed27c tee: optee: Fix unsigned comparison with less than zero +[v5.1-rc1] bb342f016862 tee: fix possible error pointer ctx dereferencing +[v5.1-rc1] 50ceca6894ad tee: optee: Initialize some structs using memset instead of braces +[v5.1-rc1] c3fa24af9244 tee: optee: add TEE bus device enumeration support +[v5.1-rc1] 0fc1db9d1059 tee: add bus driver framework for TEE based devices +[v5.1-rc1] 42bf4152d8a7 tee: add supp_nowait flag in tee_context struct +[v5.0] c7c0d8df0b94 tee: optee: add missing of_node_put after of_device_is_available +[v5.0-rc1] 3c15ddb97c77 tee: optee: log message if dynamic shm is enabled +[v5.0-rc1] b2d102bd0146 tee: optee: avoid possible double list_del() +[v4.20-rc1] 25559c22cef8 tee: add kernel internal client interface +[v4.20-rc1] db878f76b9ff tee: optee: take DT status property into account +[v4.19] 3249527f19d6 tee: optee: making OPTEE_SHM_NUM_PRIV_PAGES configurable via Kconfig +[v4.19] cf89fe88a676 tee: replace getnstimeofday64() with ktime_get_real_ts64() +[v4.17] ab9d3db5b320 tee: check shm references are consistent in offset/size +[v4.17] bb765d1c331f tee: shm: fix use-after-free via temporarily dropped reference +[v4.16] 5c5f80307ab2 tee: optee: report OP-TEE revision information +[v4.16] 6e112de04278 tee: optee: GET_OS_REVISION: document a2 as a build identifier +[v4.16] 7dd003aec201 correct max value for id allocation +[v4.16] ded4c39e93f3 arm/arm64: smccc: Make function identifiers an unsigned quantity +[v4.16] 2490cdf6435b tee: shm: Potential NULL dereference calling tee_shm_register() +[v4.16] c94f31b526fe tee: shm: don't put_page on null shm->pages +[v4.16] 80ec6f5de60b tee: shm: make function __tee_shm_alloc static +[v4.16] cdbcf83d29c1 tee: optee: check type of registered shared memory +[v4.16] 95ffe4ca4387 tee: add start argument to shm_register callback +[v4.16] f681e08f671a tee: optee: fix header dependencies +[v4.16] ef8e08d24ca8 tee: shm: inline tee_shm_get_id() +[v4.16] 217e0250cccb tee: use reference counting for tee_context +[v4.16] f58e236c9d66 tee: optee: enable dynamic SHM support +[v4.16] abd135ba215c tee: optee: add optee-specific shared pool implementation +[v4.16] d885cc5e0759 tee: optee: store OP-TEE capabilities in private data +[v4.16] 53a107c812de tee: optee: add registered buffers handling into RPC calls +[v4.16] 64cf9d8a672e tee: optee: add registered shared parameters handling +[v4.16] 06ca79179c4e tee: optee: add shared buffer registration functions +[v4.16] 3bb48ba5cd60 tee: optee: add page list manipulation functions +[v4.16] de5c6dfc43da tee: optee: Update protocol definitions +[v4.16] e0c69ae8bfb5 tee: shm: add page accessor functions +[v4.16] b25946ad951c tee: shm: add accessors for buffer size and page offset +[v4.16] 033ddf12bcf5 tee: add register user memory +[v4.16] e2aca5d8928a tee: flexible shared memory pool creation +[v4.16] 1647a5ac1754 optee: support asynchronous supplicant requests +[v4.16] f2aa97240c84 tee: add TEE_IOCTL_PARAM_ATTR_META +[v4.16] 84debcc53533 tee: add tee_param_is_memref() for driver use +[v4.15] f044113113dd optee: fix invalid of_node_put() in optee_driver_init() +[v4.14] 39e6519a3f13 tee: optee: sync with new naming of interrupts +[v4.14] 059cf566e123 tee: indicate privileged dev in gen_caps +[v4.14] a9980e947ec9 tee: optee: interruptible RPC sleep +[v4.14] 96e72ddeec45 tee: optee: add const to tee_driver_ops and tee_desc structures +[v4.14] 53e3ca5cee24 tee: tee_shm: Constify dma_buf_ops structures. +[v4.14] 999616b8536c tee: add forward declaration for struct device +[v4.14] efb14036bd7f tee: optee: fix uninitialized symbol 'parg' +[v4.12] e84188852a72 tee: add ARM_SMCCC dependency +[v4.12] 4fb0a5eb364d tee: add OP-TEE driver +[v4.12] 967c9cca2cc5 tee: generic TEE subsystem +[v4.5] 14457459f9ca ARM: 8480/2: arm64: add implementation for arm-smccc +[v4.5] b329f95d70f3 ARM: 8479/2: add implementation for arm-smccc +[v4.5] 98dd64f34f47 ARM: 8478/2: arm/arm64: add arm-smccc + +