diff --git a/Documentation/devicetree/bindings/reserved-memory/linaro,secure-heap.yaml b/Documentation/devicetree/bindings/reserved-memory/linaro,secure-heap.yaml new file mode 100644 index 00000000000000..80522a4e298948 --- /dev/null +++ b/Documentation/devicetree/bindings/reserved-memory/linaro,secure-heap.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/reserved-memory/linaro,secure-heap.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Linaro Secure DMABUF Heap + +maintainers: + - Olivier Masse + +description: + Linaro OP-TEE firmware needs a reserved memory for the + Secure Data Path feature (aka SDP). + The purpose is to provide a secure memory heap which allow + non-secure OS to allocate/free secure buffers. + The TEE is reponsible for protecting the SDP memory buffers. + TEE Trusted Application can access secure memory references + provided as parameters (DMABUF file descriptor). + +allOf: + - $ref: "reserved-memory.yaml" + +properties: + compatible: + const: linaro,secure-heap + + reg: + description: + Region of memory reserved for OP-TEE SDP feature + + no-map: + $ref: /schemas/types.yaml#/definitions/flag + description: + Avoid creating a virtual mapping of the region as part of the OS' + standard mapping of system memory. + +unevaluatedProperties: false + +required: + - compatible + - reg + - no-map + +examples: + - | + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + + sdp@3e800000 { + compatible = "linaro,secure-heap"; + no-map; + reg = <0 0x3E800000 0 0x00400000>; + }; + }; diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi index 1cfc2f011e7043..776ae3abb1327c 100644 --- a/arch/arm/boot/dts/stm32mp151.dtsi +++ b/arch/arm/boot/dts/stm32mp151.dtsi @@ -35,6 +35,14 @@ method = "smc"; }; + firmware { + optee: optee { + compatible = "linaro,optee-tz"; + method = "smc"; + status = "disabled"; + }; + }; + intc: interrupt-controller@a0021000 { compatible = "arm,cortex-a7-gic"; #interrupt-cells = <3>; diff --git a/arch/arm/boot/dts/stm32mp157c-dk2.dts b/arch/arm/boot/dts/stm32mp157c-dk2.dts index 2bc92ef3aeb957..60075aa1d6a85d 100644 --- a/arch/arm/boot/dts/stm32mp157c-dk2.dts +++ b/arch/arm/boot/dts/stm32mp157c-dk2.dts @@ -16,6 +16,13 @@ model = "STMicroelectronics STM32MP157C-DK2 Discovery Board"; compatible = "st,stm32mp157c-dk2", "st,stm32mp157"; + reserved-memory { + optee_memory: optee@0xde000000 { + reg = <0xde000000 0x02000000>; + no-map; + }; + }; + aliases { ethernet0 = ðernet0; serial0 = &uart4; @@ -99,3 +106,7 @@ pinctrl-2 = <&usart2_idle_pins_c>; status = "disabled"; }; + +&optee { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts index 46b471d09c50c4..0242aab94b3634 100644 --- a/arch/arm/boot/dts/stm32mp157c-ed1.dts +++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts @@ -70,6 +70,11 @@ reg = <0xe8000000 0x8000000>; no-map; }; + + optee_memory: optee@fe000000 { + reg = <0xfe000000 0x2000000>; + no-map; + }; }; aliases { @@ -320,6 +325,10 @@ status = "okay"; }; +&optee { + status = "okay"; +}; + &pwr_regulators { vdd-supply = <&vdd>; vdd_3v3_usbfs-supply = <&vdd_usb>; diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index fbf13f7c2baf5d..b94f907d439e1e 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -22,11 +22,14 @@ aliases { serial0 = &v2m_serial0; - serial1 = &v2m_serial1; serial2 = &v2m_serial2; serial3 = &v2m_serial3; }; + ftpm { + compatible = "microsoft,ftpm"; + }; + cpus { #address-cells = <2>; #size-cells = <0>; @@ -67,6 +70,17 @@ <0x00000008 0x80000000 0 0x80000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + optee@0x83000000 { + reg = <0x00000000 0x83000000 0 0x01000000>; + no-map; + }; + }; + timer { compatible = "arm,armv8-timer"; interrupts = , @@ -196,14 +210,6 @@ clock-names = "uartclk", "apb_pclk"; }; - v2m_serial1: serial@a0000 { - compatible = "arm,pl011", "arm,primecell"; - reg = <0x0a0000 0x1000>; - interrupts = <6>; - clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>; - clock-names = "uartclk", "apb_pclk"; - }; - v2m_serial2: serial@b0000 { compatible = "arm,pl011", "arm,primecell"; reg = <0x0b0000 0x1000>; @@ -227,4 +233,12 @@ }; }; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; + }; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index a2635b14da3098..d2f967b5d7ebec 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -800,6 +800,18 @@ <0x00000008 0x80000000 0x1 0x80000000>; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* Shared memory between secure and non-secure world */ + optee@0xfee00000 { + reg = <0x00000000 0xfee00000 0 0x00200000>; + no-map; + }; + }; + bus@8000000 { #interrupt-cells = <1>; interrupt-map-mask = <0 0 15>; @@ -827,4 +839,11 @@ interrupt-map-mask = <0 0>; interrupt-map = <0 0 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts index 7d370dac4c8571..3bb161655313c4 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts @@ -6,6 +6,7 @@ */ /dts-v1/; +/memreserve/ 0x00000000 0x04080000; #include #include "hi3798cv200.dtsi" @@ -70,6 +71,13 @@ gpio = <&gpio6 7 0>; enable-active-high; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; &ehci { diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index 3df2afb2f63796..e4af0d914279ae 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -258,6 +258,17 @@ }; }; + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + + sdp@3e800000 { + compatible = "linaro,secure-heap"; + no-map; + reg = <0 0x3E800000 0 0x00400000>; + }; + }; + sound_card { compatible = "audio-graph-card"; dais = <&i2s0_port0>; diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 4fa1e93302c751..6adcda06d3aca2 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts @@ -60,6 +60,13 @@ gpio = <&pio 9 GPIO_ACTIVE_HIGH>; enable-active-high; }; + + firmware { + optee { + compatible = "linaro,optee-tz"; + method = "smc"; + }; + }; }; &mfg_async { diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 2b7d331a458838..c3de90705c9d5e 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -534,15 +534,6 @@ reg = <0 0x10007000 0 0x100>; }; - timer: timer@10008000 { - compatible = "mediatek,mt8173-timer", - "mediatek,mt6577-timer"; - reg = <0 0x10008000 0 0x1000>; - interrupts = ; - clocks = <&infracfg CLK_INFRA_CLK_13M>, - <&topckgen CLK_TOP_RTC_SEL>; - }; - pwrap: pwrap@1000d000 { compatible = "mediatek,mt8173-pwrap"; reg = <0 0x1000d000 0 0x1000>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 30516dc0b70ec2..cc909b3ce01c9e 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1235,6 +1235,10 @@ CONFIG_CRYPTO_DEV_HISI_SEC2=m CONFIG_CRYPTO_DEV_HISI_ZIP=m CONFIG_CRYPTO_DEV_HISI_HPRE=m CONFIG_CRYPTO_DEV_HISI_TRNG=m +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_HEAPS_DEFERRED_FREE=y +CONFIG_DMABUF_HEAPS_PAGE_POOL=y +CONFIG_DMABUF_HEAPS_SECURE=y CONFIG_CMA_SIZE_MBYTES=32 CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c422644..0d1cf557e6848e 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -1,3 +1,13 @@ +menuconfig DMABUF_HEAPS_DEFERRED_FREE + bool "DMA-BUF heaps deferred-free library" + help + Choose this option to enable the DMA-BUF heaps deferred-free library. + +menuconfig DMABUF_HEAPS_PAGE_POOL + bool "DMA-BUF heaps page-pool library" + help + Choose this option to enable the DMA-BUF heaps page-pool library. + config DMABUF_HEAPS_SYSTEM bool "DMA-BUF System Heap" depends on DMABUF_HEAPS @@ -12,3 +22,12 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config DMABUF_HEAPS_SECURE + tristate "DMA-BUF Secure Heap" + depends on DMABUF_HEAPS && DMABUF_HEAPS_DEFERRED_FREE + help + Choose this option to enable the secure dmabuf heap. The secure heap + pools are defined according to the DT. Heaps are allocated + in the pools using gen allocater. + If in doubt, say Y. diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 974467791032ff..29b414ec7971ef 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,3 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o +obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL) += page_pool.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o +obj-$(CONFIG_DMABUF_HEAPS_SECURE) += secure_heap.o diff --git a/drivers/dma-buf/heaps/deferred-free-helper.c b/drivers/dma-buf/heaps/deferred-free-helper.c new file mode 100644 index 00000000000000..478faa319908e6 --- /dev/null +++ b/drivers/dma-buf/heaps/deferred-free-helper.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Deferred dmabuf freeing helper + * + * Copyright (C) 2020 Linaro, Ltd. + * + * Based on the ION page pool code + * Copyright (C) 2011 Google, Inc. + */ + +#include +#include +#include +#include +#include + +#include "deferred-free-helper.h" + +static LIST_HEAD(free_list); +static size_t list_nr_pages; +static wait_queue_head_t freelist_waitqueue; +static struct task_struct *freelist_task; +static DEFINE_SPINLOCK(free_list_lock); + +void deferred_free(struct deferred_freelist_item *item, + void (*free)(struct deferred_freelist_item*, + enum df_reason), + size_t nr_pages) +{ + unsigned long flags; + + if (!nr_pages) + return; + + INIT_LIST_HEAD(&item->list); + item->nr_pages = nr_pages; + item->free = free; + + spin_lock_irqsave(&free_list_lock, flags); + list_add(&item->list, &free_list); + list_nr_pages += nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + wake_up(&freelist_waitqueue); +} +EXPORT_SYMBOL_GPL(deferred_free); + +static size_t free_one_item(enum df_reason reason) +{ + unsigned long flags; + size_t nr_pages; + struct deferred_freelist_item *item; + + spin_lock_irqsave(&free_list_lock, flags); + if (list_empty(&free_list)) { + spin_unlock_irqrestore(&free_list_lock, flags); + return 0; + } + item = list_first_entry(&free_list, struct deferred_freelist_item, list); + list_del(&item->list); + nr_pages = item->nr_pages; + list_nr_pages -= nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + + item->free(item, reason); + return nr_pages; +} + +static unsigned long get_freelist_nr_pages(void) +{ + unsigned long nr_pages; + unsigned long flags; + + spin_lock_irqsave(&free_list_lock, flags); + nr_pages = list_nr_pages; + spin_unlock_irqrestore(&free_list_lock, flags); + return nr_pages; +} + +static unsigned long freelist_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + return get_freelist_nr_pages(); +} + +static unsigned long freelist_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + unsigned long total_freed = 0; + + while (total_freed < sc->nr_to_scan) { + size_t pages_freed = free_one_item(DF_UNDER_PRESSURE); + + if (!pages_freed) + break; + + total_freed += pages_freed; + } + + return total_freed; +} + +static struct shrinker freelist_shrinker = { + .count_objects = freelist_shrink_count, + .scan_objects = freelist_shrink_scan, + .seeks = DEFAULT_SEEKS, + .batch = 0, +}; + +static int deferred_free_thread(void *data) +{ + while (true) { + wait_event_freezable(freelist_waitqueue, + get_freelist_nr_pages() > 0); + + free_one_item(DF_NORMAL); + } + + return 0; +} + +static int deferred_freelist_init(void) +{ + init_waitqueue_head(&freelist_waitqueue); + freelist_task = kthread_run(deferred_free_thread, NULL, + "%s", "dmabuf-deferred-free-worker"); + if (IS_ERR(freelist_task)) { + pr_err("Creating thread for deferred free failed\n"); + return PTR_ERR(freelist_task); + } + sched_set_normal(freelist_task, 19); + + return register_shrinker(&freelist_shrinker); +} +module_init(deferred_freelist_init); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/dma-buf/heaps/deferred-free-helper.h b/drivers/dma-buf/heaps/deferred-free-helper.h new file mode 100644 index 00000000000000..4ed5893fdf3aa9 --- /dev/null +++ b/drivers/dma-buf/heaps/deferred-free-helper.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Deferred dmabuf freeing helper + * + * Copyright (C) 2020 Linaro, Ltd. + * + * Based on the ION page pool code + * Copyright (C) 2011 Google, Inc. + */ + +#ifndef DEFERRED_FREE_HELPER_H +#define DEFERRED_FREE_HELPER_H + +/** + * df_reason - enum for reason why item was freed + * + * This provides a reason for why the free function was called + * on the item. This is useful when deferred_free is used in + * combination with a pagepool, so under pressure the page can + * be immediately freed. + * + * DF_NORMAL: Normal deferred free + * + * DF_UNDER_PRESSURE: Free was called because the system + * is under memory pressure. Usually + * from a shrinker. Avoid allocating + * memory in the free call, as it may + * fail. + */ +enum df_reason { + DF_NORMAL, + DF_UNDER_PRESSURE, +}; + +/** + * deferred_freelist_item - item structure for deferred freelist + * + * This is to be added to the structure for whatever you want to + * defer freeing on. + * + * @nr_pages: number of pages used by item to be freed + * @free: function pointer to be called when freeing the item + * @list: list entry for the deferred list + */ +struct deferred_freelist_item { + size_t nr_pages; + void (*free)(struct deferred_freelist_item *i, + enum df_reason reason); + struct list_head list; +}; + +/** + * deferred_free - add item to the deferred free list + * + * @item: Pointer to deferred_freelist_item field of a structure + * @free: Function pointer to the free call + * @nr_pages: number of pages to be freed + */ +void deferred_free(struct deferred_freelist_item *item, + void (*free)(struct deferred_freelist_item *i, + enum df_reason reason), + size_t nr_pages); +#endif diff --git a/drivers/dma-buf/heaps/page_pool.c b/drivers/dma-buf/heaps/page_pool.c new file mode 100644 index 00000000000000..3dd4c3862dca2b --- /dev/null +++ b/drivers/dma-buf/heaps/page_pool.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA BUF page pool system + * + * Copyright (C) 2020 Linaro Ltd. + * + * Based on the ION page pool code + * Copyright (C) 2011 Google, Inc. + */ + +#include +#include +#include +#include +#include +#include "page_pool.h" + +static LIST_HEAD(pool_list); +static DEFINE_MUTEX(pool_list_lock); + +static struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool) +{ + if (fatal_signal_pending(current)) + return NULL; + return alloc_pages(pool->gfp_mask, pool->order); +} + +static void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool, + struct page *page) +{ + __free_pages(page, pool->order); +} + +static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page) +{ + int index; + + if (PageHighMem(page)) + index = POOL_HIGHPAGE; + else + index = POOL_LOWPAGE; + + mutex_lock(&pool->mutex); + list_add_tail(&page->lru, &pool->items[index]); + pool->count[index]++; + mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, + 1 << pool->order); + mutex_unlock(&pool->mutex); +} + +static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index) +{ + struct page *page; + + mutex_lock(&pool->mutex); + page = list_first_entry_or_null(&pool->items[index], struct page, lru); + if (page) { + pool->count[index]--; + list_del(&page->lru); + mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, + -(1 << pool->order)); + } + mutex_unlock(&pool->mutex); + + return page; +} + +static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool) +{ + struct page *page = NULL; + + page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE); + if (!page) + page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE); + + return page; +} + +struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool) +{ + struct page *page = NULL; + + if (WARN_ON(!pool)) + return NULL; + + page = dmabuf_page_pool_fetch(pool); + if (!page) + page = dmabuf_page_pool_alloc_pages(pool); + + return page; +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc); + +void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page) +{ + if (WARN_ON(pool->order != compound_order(page))) + return; + + dmabuf_page_pool_add(pool, page); +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_free); + +static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high) +{ + int count = pool->count[POOL_LOWPAGE]; + + if (high) + count += pool->count[POOL_HIGHPAGE]; + + return count << pool->order; +} + +struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ + struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); + int i; + + if (!pool) + return NULL; + + for (i = 0; i < POOL_TYPE_SIZE; i++) { + pool->count[i] = 0; + INIT_LIST_HEAD(&pool->items[i]); + } + pool->gfp_mask = gfp_mask | __GFP_COMP; + pool->order = order; + mutex_init(&pool->mutex); + + mutex_lock(&pool_list_lock); + list_add(&pool->list, &pool_list); + mutex_unlock(&pool_list_lock); + + return pool; +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_create); + +void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool) +{ + struct page *page; + int i; + + /* Remove us from the pool list */ + mutex_lock(&pool_list_lock); + list_del(&pool->list); + mutex_unlock(&pool_list_lock); + + /* Free any remaining pages in the pool */ + for (i = 0; i < POOL_TYPE_SIZE; i++) { + while ((page = dmabuf_page_pool_remove(pool, i))) + dmabuf_page_pool_free_pages(pool, page); + } + + kfree(pool); +} +EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy); + +static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan) +{ + int freed = 0; + bool high; + + if (current_is_kswapd()) + high = true; + else + high = !!(gfp_mask & __GFP_HIGHMEM); + + if (nr_to_scan == 0) + return dmabuf_page_pool_total(pool, high); + + while (freed < nr_to_scan) { + struct page *page; + + /* Try to free low pages first */ + page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE); + if (!page) + page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE); + + if (!page) + break; + + dmabuf_page_pool_free_pages(pool, page); + freed += (1 << pool->order); + } + + return freed; +} + +static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan) +{ + struct dmabuf_page_pool *pool; + int nr_total = 0; + int nr_freed; + bool only_scan = false; + + if (!nr_to_scan) + only_scan = true; + + mutex_lock(&pool_list_lock); + list_for_each_entry(pool, &pool_list, list) { + if (only_scan) { + nr_total += dmabuf_page_pool_do_shrink(pool, + gfp_mask, + nr_to_scan); + } else { + nr_freed = dmabuf_page_pool_do_shrink(pool, + gfp_mask, + nr_to_scan); + nr_to_scan -= nr_freed; + nr_total += nr_freed; + if (nr_to_scan <= 0) + break; + } + } + mutex_unlock(&pool_list_lock); + + return nr_total; +} + +static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + return dmabuf_page_pool_shrink(sc->gfp_mask, 0); +} + +static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + if (sc->nr_to_scan == 0) + return 0; + return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan); +} + +struct shrinker pool_shrinker = { + .count_objects = dmabuf_page_pool_shrink_count, + .scan_objects = dmabuf_page_pool_shrink_scan, + .seeks = DEFAULT_SEEKS, + .batch = 0, +}; + +static int dmabuf_page_pool_init_shrinker(void) +{ + return register_shrinker(&pool_shrinker); +} +module_init(dmabuf_page_pool_init_shrinker); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma-buf/heaps/page_pool.h b/drivers/dma-buf/heaps/page_pool.h new file mode 100644 index 00000000000000..e3ec9eaacbc271 --- /dev/null +++ b/drivers/dma-buf/heaps/page_pool.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DMA BUF PagePool implementation + * Based on earlier ION code by Google + * + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2020 Linaro Ltd. + */ + +#ifndef _DMABUF_PAGE_POOL_H +#define _DMABUF_PAGE_POOL_H + +#include +#include +#include +#include +#include +#include + +/* page types we track in the pool */ +enum { + POOL_LOWPAGE, /* Clean lowmem pages */ + POOL_HIGHPAGE, /* Clean highmem pages */ + + POOL_TYPE_SIZE +}; + +/** + * struct dmabuf_page_pool - pagepool struct + * @count[]: array of number of pages of that type in the pool + * @items[]: array of list of pages of the specific type + * @mutex: lock protecting this struct and especially the count + * item list + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: list node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use + */ +struct dmabuf_page_pool { + int count[POOL_TYPE_SIZE]; + struct list_head items[POOL_TYPE_SIZE]; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct list_head list; +}; + +struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, + unsigned int order); +void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool); +struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool); +void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page); + +#endif /* _DMABUF_PAGE_POOL_H */ diff --git a/drivers/dma-buf/heaps/secure_heap.c b/drivers/dma-buf/heaps/secure_heap.c new file mode 100644 index 00000000000000..d3dd7906313eb5 --- /dev/null +++ b/drivers/dma-buf/heaps/secure_heap.c @@ -0,0 +1,590 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMABUF secure heap exporter + * + * Copyright 2021 NXP. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "deferred-free-helper.h" +#include "page_pool.h" + +#define MAX_SECURE_HEAP 2 +#define MAX_HEAP_NAME_LEN 32 + +struct secure_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; + int vmap_cnt; + struct deferred_freelist_item deferred_free; + void *vaddr; + bool uncached; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool no_map; + bool mapped; + bool uncached; +}; + +struct secure_heap_info { + struct gen_pool *pool; + + bool no_map; +}; + +struct rmem_secure { + phys_addr_t base; + phys_addr_t size; + + char name[MAX_HEAP_NAME_LEN]; + + bool no_map; +}; + +static struct rmem_secure secure_data[MAX_SECURE_HEAP] = {0}; +static unsigned int secure_data_count; + +static struct sg_table *dup_sg_table(struct sg_table *table) +{ + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; + + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg->dma_address = sg->dma_address; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + new_sg->dma_length = sg->dma_length; +#endif + new_sg = sg_next(new_sg); + } + + return new_table; +} + +static int secure_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + struct secure_heap_info *info = dma_heap_get_drvdata(buffer->heap); + struct dma_heap_attachment *a; + struct sg_table *table; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { + kfree(a); + return -ENOMEM; + } + + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->no_map = info->no_map; + a->mapped = false; + a->uncached = buffer->uncached; + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void secure_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} + +static struct sg_table *secure_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + int attr = 0; + int ret; + + if (!a->no_map) { + if (a->uncached) + attr = DMA_ATTR_SKIP_CPU_SYNC; + + ret = dma_map_sgtable(attachment->dev, table, direction, attr); + if (ret) + return ERR_PTR(ret); + + a->mapped = true; + } + + return table; +} + +static void secure_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + int attr = 0; + + if (!a->no_map) { + if (a->uncached) + attr = DMA_ATTR_SKIP_CPU_SYNC; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, attr); + } +} + +static int secure_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + } + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int secure_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + if (!buffer->uncached) { + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, a->table, direction); + } + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int secure_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + if (buffer->uncached) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + } + return 0; +} + +static void *secure_heap_do_vmap(struct secure_heap_buffer *buffer) +{ + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + pgprot_t pgprot = PAGE_KERNEL; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + if (buffer->uncached) + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sgtable_page(table, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } + + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int secure_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + goto out; + } + + vaddr = secure_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void secure_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + dma_buf_map_clear(map); +} + +static void secure_heap_zero_buffer(struct secure_heap_buffer *buffer) +{ + struct sg_table *sgt = &buffer->sg_table; + struct sg_page_iter piter; + struct page *p; + void *vaddr; + + for_each_sgtable_page(sgt, &piter, 0) { + p = sg_page_iter_page(&piter); + vaddr = kmap_atomic(p); + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + } +} + +static void secure_heap_buf_free(struct deferred_freelist_item *item, + enum df_reason reason) +{ + struct secure_heap_buffer *buffer; + struct secure_heap_info *info; + struct sg_table *table; + struct scatterlist *sg; + int i; + + buffer = container_of(item, struct secure_heap_buffer, deferred_free); + info = dma_heap_get_drvdata(buffer->heap); + + if (!info->no_map) { + // Zero the buffer pages before adding back to the pool + if (reason == DF_NORMAL) + secure_heap_zero_buffer(buffer); + } + + table = &buffer->sg_table; + for_each_sg(table->sgl, sg, table->nents, i) + gen_pool_free(info->pool, sg_dma_address(sg), sg_dma_len(sg)); + + sg_free_table(table); + kfree(buffer); +} + +static void secure_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct secure_heap_buffer *buffer = dmabuf->priv; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + + deferred_free(&buffer->deferred_free, secure_heap_buf_free, npages); +} + +static const struct dma_buf_ops secure_heap_buf_ops = { + .attach = secure_heap_attach, + .detach = secure_heap_detach, + .map_dma_buf = secure_heap_map_dma_buf, + .unmap_dma_buf = secure_heap_unmap_dma_buf, + .begin_cpu_access = secure_heap_dma_buf_begin_cpu_access, + .end_cpu_access = secure_heap_dma_buf_end_cpu_access, + .mmap = secure_heap_mmap, + .vmap = secure_heap_vmap, + .vunmap = secure_heap_vunmap, + .release = secure_heap_dma_buf_release, +}; + +static struct dma_buf *secure_heap_do_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags, + bool uncached) +{ + struct secure_heap_buffer *buffer; + struct secure_heap_info *info = dma_heap_get_drvdata(heap); + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + unsigned long size = roundup(len, PAGE_SIZE); + struct dma_buf *dmabuf; + struct sg_table *table; + int ret = -ENOMEM; + unsigned long phy_addr; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->len = size; + buffer->uncached = uncached; + + phy_addr = gen_pool_alloc(info->pool, size); + if (!phy_addr) + goto free_buffer; + + table = &buffer->sg_table; + if (sg_alloc_table(table, 1, GFP_KERNEL)) + goto free_pool; + + sg_set_page(table->sgl, phys_to_page(phy_addr), size, 0); + sg_dma_address(table->sgl) = phy_addr; + sg_dma_len(table->sgl) = size; + + /* create the dmabuf */ + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &secure_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(dmabuf)) { + ret = PTR_ERR(dmabuf); + goto free_pages; + } + + return dmabuf; + +free_pages: + sg_free_table(table); + +free_pool: + gen_pool_free(info->pool, phy_addr, size); + +free_buffer: + mutex_destroy(&buffer->lock); + kfree(buffer); + + return ERR_PTR(ret); +} + +static struct dma_buf *secure_heap_allocate(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + // use uncache buffer here by default + return secure_heap_do_allocate(heap, len, fd_flags, heap_flags, true); + // use cache buffer + // return secure_heap_do_allocate(heap, len, fd_flags, heap_flags, false); +} + +static const struct dma_heap_ops secure_heap_ops = { + .allocate = secure_heap_allocate, +}; + +static int secure_heap_add(struct rmem_secure *rmem) +{ + struct dma_heap *secure_heap; + struct dma_heap_export_info exp_info; + struct secure_heap_info *info = NULL; + struct gen_pool *pool = NULL; + int ret = -EINVAL; + + if (rmem->base == 0 || rmem->size == 0) { + pr_err("secure_data base or size is not correct\n"); + goto error; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + pr_err("dmabuf info allocation failed\n"); + ret = -ENOMEM; + goto error; + } + + pool = gen_pool_create(PAGE_SHIFT, -1); + if (!pool) { + pr_err("can't create gen pool\n"); + ret = -ENOMEM; + goto error; + } + + if (gen_pool_add(pool, rmem->base, rmem->size, -1) < 0) { + pr_err("failed to add memory into pool\n"); + ret = -ENOMEM; + goto error; + } + + info->pool = pool; + info->no_map = rmem->no_map; + + exp_info.name = rmem->name; + exp_info.ops = &secure_heap_ops; + exp_info.priv = info; + + secure_heap = dma_heap_add(&exp_info); + if (IS_ERR(secure_heap)) { + pr_err("dmabuf secure heap allocation failed\n"); + ret = PTR_ERR(secure_heap); + goto error; + } + + return 0; + +error: + if (info) + kfree(info); + if (pool) + gen_pool_destroy(pool); + + return ret; +} + +static int secure_heap_create(void) +{ + unsigned int i; + int ret; + + for (i = 0; i < secure_data_count; i++) { + ret = secure_heap_add(&secure_data[i]); + if (ret) + return ret; + } + return 0; +} + +static int rmem_secure_heap_device_init(struct reserved_mem *rmem, + struct device *dev) +{ + dev_set_drvdata(dev, rmem); + return 0; +} + +static void rmem_secure_heap_device_release(struct reserved_mem *rmem, + struct device *dev) +{ + dev_set_drvdata(dev, NULL); +} + +static const struct reserved_mem_ops rmem_dma_ops = { + .device_init = rmem_secure_heap_device_init, + .device_release = rmem_secure_heap_device_release, +}; + +static int __init rmem_secure_heap_setup(struct reserved_mem *rmem) +{ + if (secure_data_count < MAX_SECURE_HEAP) { + int name_len = 0; + char *s = rmem->name; + + secure_data[secure_data_count].base = rmem->base; + secure_data[secure_data_count].size = rmem->size; + secure_data[secure_data_count].no_map = + (of_get_flat_dt_prop(rmem->fdt_node, "no-map", NULL) != NULL); + + while (name_len < MAX_HEAP_NAME_LEN) { + if ((*s == '@') || (*s == '\0')) + break; + name_len++; + s++; + } + if (name_len == MAX_HEAP_NAME_LEN) + name_len--; + + strncpy(secure_data[secure_data_count].name, rmem->name, name_len); + + rmem->ops = &rmem_dma_ops; + pr_info("Reserved memory: DMA buf secure pool %s at %pa, size %ld MiB\n", + secure_data[secure_data_count].name, + &rmem->base, (unsigned long)rmem->size / SZ_1M); + + secure_data_count++; + return 0; + } else { + WARN_ONCE(1, "Cannot handle more than %u secure heaps\n", MAX_SECURE_HEAP); + return -EINVAL; + } +} + +RESERVEDMEM_OF_DECLARE(secure_heap, "linaro,secure-heap", rmem_secure_heap_setup); + +module_init(secure_heap_create); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 3fc426dad2df30..1558b049a12681 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -356,6 +356,42 @@ tee_ioctl_shm_register(struct tee_context *ctx, return ret; } +static int tee_ioctl_shm_register_fd(struct tee_context *ctx, + struct tee_ioctl_shm_register_fd_data __user *udata) +{ + struct tee_ioctl_shm_register_fd_data data; + struct tee_shm *shm; + long ret; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + shm = tee_shm_register_fd(ctx, data.fd); + if (IS_ERR(shm)) + return -EINVAL; + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + static int params_from_user(struct tee_context *ctx, struct tee_param *params, size_t num_params, struct tee_ioctl_param __user *uparams) @@ -830,6 +866,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return tee_ioctl_shm_alloc(ctx, uarg); case TEE_IOC_SHM_REGISTER: return tee_ioctl_shm_register(ctx, uarg); + case TEE_IOC_SHM_REGISTER_FD: + return tee_ioctl_shm_register_fd(ctx, uarg); case TEE_IOC_OPEN_SESSION: return tee_ioctl_open_session(ctx, uarg); case TEE_IOC_INVOKE: diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 499fccba3d74bd..ce528505cfe115 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -4,6 +4,7 @@ */ #include #include +#include #include #include #include @@ -12,6 +13,14 @@ #include #include "tee_private.h" +/* extra references appended to shm object for registered shared memory */ +struct tee_shm_dmabuf_ref { + struct tee_shm shm; + struct dma_buf *dmabuf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; +}; + static void release_registered_pages(struct tee_shm *shm) { if (shm->pages) { @@ -30,7 +39,16 @@ static void release_registered_pages(struct tee_shm *shm) static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) { - if (shm->flags & TEE_SHM_POOL) { + if (shm->flags & TEE_SHM_EXT_DMA_BUF) { + struct tee_shm_dmabuf_ref *ref; + + ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); + dma_buf_unmap_attachment(ref->attach, ref->sgt, + DMA_BIDIRECTIONAL); + + dma_buf_detach(ref->dmabuf, ref->attach); + dma_buf_put(ref->dmabuf); + } else if (shm->flags & TEE_SHM_POOL) { struct tee_shm_pool_mgr *poolm; if (shm->flags & TEE_SHM_DMA_BUF) @@ -48,11 +66,10 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) release_registered_pages(shm); } - - teedev_ctx_put(shm->ctx); + if (shm->ctx) + teedev_ctx_put(shm->ctx); kfree(shm); - tee_device_put(teedev); } @@ -137,7 +154,7 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc); * tee_client_invoke_func(). The memory allocated is later freed with a * call to tee_shm_free(). * - * @returns a pointer to 'struct tee_shm' + * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure */ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) { @@ -145,6 +162,83 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) } EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); +struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd) +{ + struct tee_shm_dmabuf_ref *ref; + int rc; + + if (!tee_device_get(ctx->teedev)) + return ERR_PTR(-EINVAL); + + teedev_ctx_get(ctx); + + ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!ref) { + rc = -ENOMEM; + goto err_put_tee; + } + + refcount_set(&ref->shm.refcount, 1); + ref->shm.ctx = ctx; + ref->shm.id = -1; + + ref->dmabuf = dma_buf_get(fd); + if (IS_ERR(ref->dmabuf)) { + rc = PTR_ERR(ref->dmabuf); + goto err_put_dmabuf; + } + + ref->attach = dma_buf_attach(ref->dmabuf, &ref->shm.ctx->teedev->dev); + if (IS_ERR(ref->attach)) { + rc = PTR_ERR(ref->attach); + goto err_detach; + } + + ref->sgt = dma_buf_map_attachment(ref->attach, DMA_BIDIRECTIONAL); + if (IS_ERR(ref->sgt)) { + rc = PTR_ERR(ref->sgt); + goto err_unmap_attachement; + } + + if (sg_nents(ref->sgt->sgl) != 1) { + rc = PTR_ERR(ref->sgt->sgl); + goto err_unmap_attachement; + } + + ref->shm.paddr = sg_dma_address(ref->sgt->sgl); + ref->shm.size = sg_dma_len(ref->sgt->sgl); + ref->shm.flags = TEE_SHM_DMA_BUF | TEE_SHM_EXT_DMA_BUF; + + mutex_lock(&ref->shm.ctx->teedev->mutex); + ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm, + 1, 0, GFP_KERNEL); + mutex_unlock(&ref->shm.ctx->teedev->mutex); + if (ref->shm.id < 0) { + rc = ref->shm.id; + goto err_idr_remove; + } + + return &ref->shm; + +err_idr_remove: + mutex_lock(&ctx->teedev->mutex); + idr_remove(&ctx->teedev->idr, ref->shm.id); + mutex_unlock(&ctx->teedev->mutex); +err_unmap_attachement: + dma_buf_unmap_attachment(ref->attach, ref->sgt, DMA_BIDIRECTIONAL); +err_detach: + dma_buf_detach(ref->dmabuf, ref->attach); +err_put_dmabuf: + dma_buf_put(ref->dmabuf); + kfree(ref); +err_put_tee: + teedev_ctx_put(ctx); + tee_device_put(ctx->teedev); + + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(tee_shm_register_fd); + struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags) { diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 5e1533ee3785bc..b019dcdad9b226 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -335,11 +335,21 @@ void *tee_get_drvdata(struct tee_device *teedev); * TEE_SHM_DMA_BUF global shared memory will be allocated and associated * with a dma-buf handle, else driver private memory. * - * @returns a pointer to 'struct tee_shm' + * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); +/** + * tee_shm_register_fd() - Register shared memory from file descriptor + * + * @ctx: Context that allocates the shared memory + * @fd: Shared memory file descriptor reference + * + * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure + */ +struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd); + /** * tee_shm_register() - Register shared memory buffer * @ctx: Context that registers the shared memory @@ -347,7 +357,7 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); * @length: Length of the shared buffer * @flags: Flags setting properties for the requested shared memory. * - * @returns a pointer to 'struct tee_shm' + * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure */ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, size_t length, u32 flags); diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index 25a6c534beb1b6..6f84060ad00577 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -121,6 +121,35 @@ struct tee_ioctl_shm_alloc_data { #define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \ struct tee_ioctl_shm_alloc_data) +/** + * struct tee_ioctl_shm_register_fd_data - Shared memory registering argument + * @fd: [in] File descriptor identifying the shared memory + * @size: [out] Size of shared memory to allocate + * @flags: [in] Flags to/from allocation. + * @id: [out] Identifier of the shared memory + * + * The flags field should currently be zero as input. Updated by the call + * with actual flags as defined by TEE_IOCTL_SHM_* above. + * This structure is used as argument for TEE_IOC_SHM_REGISTER_FD below. + */ +struct tee_ioctl_shm_register_fd_data { + __s64 fd; + __u64 size; + __u32 flags; + __s32 id; +} __aligned(8); + +/** + * TEE_IOC_SHM_REGISTER_FD - register a shared memory from a file descriptor + * + * Returns a file descriptor on success or < 0 on failure + * + * The returned file descriptor refers to the shared memory object in kernel + * land. The shared memory is freed when the descriptor is closed. + */ +#define TEE_IOC_SHM_REGISTER_FD _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 8, \ + struct tee_ioctl_shm_register_fd_data) + /** * struct tee_ioctl_buf_data - Variable sized buffer * @buf_ptr: [in] A __user pointer to a buffer