Skip to content

Commit 9356b50

Browse files
committed
Merge tag 'drm-misc-next-2025-06-19' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for 6.17: UAPI Changes: - Add Task Information for the wedge API Cross-subsystem Changes: Core Changes: - Fix warnings related to export.h - fbdev: Make CONFIG_FIRMWARE_EDID available on all architectures - fence: Fix UAF issues - format-helper: Improve tests Driver Changes: - ivpu: Add turbo flag, Add Wildcat Lake Support - rz-du: Improve MIPI-DSI Support - vmwgfx: fence improvement Signed-off-by: Dave Airlie <[email protected]> From: Maxime Ripard <[email protected]> Link: https://lore.kernel.org/r/20250619-perfect-industrious-whippet-8ed3db@houat
2 parents 377b2f1 + 1a45ef0 commit 9356b50

File tree

222 files changed

+1488
-1433
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

222 files changed

+1488
-1433
lines changed

Documentation/gpu/drm-uapi.rst

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -446,6 +446,23 @@ telemetry information (devcoredump, syslog). This is useful because the first
446446
hang is usually the most critical one which can result in consequential hangs or
447447
complete wedging.
448448

449+
Task information
450+
---------------
451+
452+
The information about which application (if any) was involved in the device
453+
wedging is useful for userspace if they want to notify the user about what
454+
happened (e.g. the compositor display a message to the user "The <task name>
455+
caused a graphical error and the system recovered") or to implement policies
456+
(e.g. the daemon may "ban" an task that keeps resetting the device). If the task
457+
information is available, the uevent will display as ``PID=<pid>`` and
458+
``TASK=<task name>``. Otherwise, ``PID`` and ``TASK`` will not appear in the
459+
event string.
460+
461+
The reliability of this information is driver and hardware specific, and should
462+
be taken with a caution regarding it's precision. To have a big picture of what
463+
really happened, the devcoredump file provides should have much more detailed
464+
information about the device state and about the event.
465+
449466
Consumer prerequisites
450467
----------------------
451468

arch/x86/kernel/setup.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -213,8 +213,10 @@ arch_initcall(init_x86_sysctl);
213213
*/
214214
struct screen_info screen_info;
215215
EXPORT_SYMBOL(screen_info);
216+
#if defined(CONFIG_FIRMWARE_EDID)
216217
struct edid_info edid_info;
217218
EXPORT_SYMBOL_GPL(edid_info);
219+
#endif
218220

219221
extern int root_mountflags;
220222

@@ -525,7 +527,9 @@ static void __init parse_boot_params(void)
525527
{
526528
ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
527529
screen_info = boot_params.screen_info;
530+
#if defined(CONFIG_FIRMWARE_EDID)
528531
edid_info = boot_params.edid_info;
532+
#endif
529533
#ifdef CONFIG_X86_32
530534
apm_info.bios = boot_params.apm_bios_info;
531535
ist_info = boot_params.ist_info;

drivers/accel/amdxdna/aie2_ctx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -566,7 +566,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
566566
.size = MAX_CHAIN_CMDBUF_SIZE,
567567
};
568568

569-
abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
569+
abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
570570
if (IS_ERR(abo)) {
571571
ret = PTR_ERR(abo);
572572
goto free_cmd_bufs;

drivers/accel/amdxdna/amdxdna_gem.c

Lines changed: 104 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -24,40 +24,79 @@
2424
MODULE_IMPORT_NS("DMA_BUF");
2525

2626
static int
27-
amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
27+
amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
2828
{
2929
struct amdxdna_client *client = abo->client;
3030
struct amdxdna_dev *xdna = client->xdna;
3131
struct amdxdna_mem *mem = &abo->mem;
32+
struct amdxdna_gem_obj *heap;
3233
u64 offset;
3334
u32 align;
3435
int ret;
3536

37+
mutex_lock(&client->mm_lock);
38+
39+
heap = client->dev_heap;
40+
if (!heap) {
41+
ret = -EINVAL;
42+
goto unlock_out;
43+
}
44+
45+
if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
46+
XDNA_ERR(xdna, "Invalid dev heap userptr");
47+
ret = -EINVAL;
48+
goto unlock_out;
49+
}
50+
51+
if (mem->size == 0 || mem->size > heap->mem.size) {
52+
XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
53+
mem->size, heap->mem.size);
54+
ret = -EINVAL;
55+
goto unlock_out;
56+
}
57+
3658
align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
37-
ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
59+
ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
3860
mem->size, align,
3961
0, DRM_MM_INSERT_BEST);
4062
if (ret) {
4163
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
42-
return ret;
64+
goto unlock_out;
4365
}
4466

4567
mem->dev_addr = abo->mm_node.start;
46-
offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
47-
mem->userptr = abo->dev_heap->mem.userptr + offset;
48-
mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
49-
mem->nr_pages = mem->size >> PAGE_SHIFT;
50-
51-
if (use_vmap) {
52-
mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
53-
if (!mem->kva) {
54-
XDNA_ERR(xdna, "Failed to vmap");
55-
drm_mm_remove_node(&abo->mm_node);
56-
return -EFAULT;
57-
}
58-
}
68+
offset = mem->dev_addr - heap->mem.dev_addr;
69+
mem->userptr = heap->mem.userptr + offset;
70+
mem->kva = heap->mem.kva + offset;
5971

60-
return 0;
72+
drm_gem_object_get(to_gobj(heap));
73+
74+
unlock_out:
75+
mutex_unlock(&client->mm_lock);
76+
77+
return ret;
78+
}
79+
80+
static void
81+
amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
82+
{
83+
mutex_destroy(&abo->lock);
84+
kfree(abo);
85+
}
86+
87+
static void
88+
amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
89+
{
90+
struct amdxdna_gem_obj *heap;
91+
92+
mutex_lock(&abo->client->mm_lock);
93+
94+
drm_mm_remove_node(&abo->mm_node);
95+
96+
heap = abo->client->dev_heap;
97+
drm_gem_object_put(to_gobj(heap));
98+
99+
mutex_unlock(&abo->client->mm_lock);
61100
}
62101

63102
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
@@ -213,6 +252,20 @@ static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
213252
return ret;
214253
}
215254

255+
static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj)
256+
{
257+
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
258+
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
259+
260+
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
261+
if (abo->pinned)
262+
amdxdna_gem_unpin(abo);
263+
264+
amdxdna_gem_heap_free(abo);
265+
drm_gem_object_release(gobj);
266+
amdxdna_gem_destroy_obj(abo);
267+
}
268+
216269
static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
217270
struct vm_area_struct *vma)
218271
{
@@ -374,19 +427,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
374427
if (abo->pinned)
375428
amdxdna_gem_unpin(abo);
376429

377-
if (abo->type == AMDXDNA_BO_DEV) {
378-
mutex_lock(&abo->client->mm_lock);
379-
drm_mm_remove_node(&abo->mm_node);
380-
mutex_unlock(&abo->client->mm_lock);
381-
382-
vunmap(abo->mem.kva);
383-
drm_gem_object_put(to_gobj(abo->dev_heap));
384-
drm_gem_object_release(gobj);
385-
mutex_destroy(&abo->lock);
386-
kfree(abo);
387-
return;
388-
}
389-
390430
if (abo->type == AMDXDNA_BO_DEV_HEAP)
391431
drm_mm_takedown(&abo->mm);
392432

@@ -402,7 +442,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
402442
}
403443

404444
static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
405-
.free = amdxdna_gem_obj_free,
445+
.free = amdxdna_gem_dev_obj_free,
406446
};
407447

408448
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -527,6 +567,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
527567
struct drm_file *filp)
528568
{
529569
struct amdxdna_client *client = filp->driver_priv;
570+
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
530571
struct amdxdna_dev *xdna = to_xdna_dev(dev);
531572
struct drm_gem_shmem_object *shmem;
532573
struct amdxdna_gem_obj *abo;
@@ -553,18 +594,26 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
553594

554595
shmem->map_wc = false;
555596
abo = to_xdna_obj(&shmem->base);
556-
557597
abo->type = AMDXDNA_BO_DEV_HEAP;
558598
abo->client = client;
559599
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
560600
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
561601

602+
ret = drm_gem_vmap(to_gobj(abo), &map);
603+
if (ret) {
604+
XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
605+
goto release_obj;
606+
}
607+
abo->mem.kva = map.vaddr;
608+
562609
client->dev_heap = abo;
563610
drm_gem_object_get(to_gobj(abo));
564611
mutex_unlock(&client->mm_lock);
565612

566613
return abo;
567614

615+
release_obj:
616+
drm_gem_object_put(to_gobj(abo));
568617
mm_unlock:
569618
mutex_unlock(&client->mm_lock);
570619
return ERR_PTR(ret);
@@ -573,69 +622,43 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
573622
struct amdxdna_gem_obj *
574623
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
575624
struct amdxdna_drm_create_bo *args,
576-
struct drm_file *filp, bool use_vmap)
625+
struct drm_file *filp)
577626
{
578627
struct amdxdna_client *client = filp->driver_priv;
579628
struct amdxdna_dev *xdna = to_xdna_dev(dev);
580629
size_t aligned_sz = PAGE_ALIGN(args->size);
581-
struct amdxdna_gem_obj *abo, *heap;
630+
struct amdxdna_gem_obj *abo;
582631
int ret;
583632

584-
mutex_lock(&client->mm_lock);
585-
heap = client->dev_heap;
586-
if (!heap) {
587-
ret = -EINVAL;
588-
goto mm_unlock;
589-
}
590-
591-
if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
592-
XDNA_ERR(xdna, "Invalid dev heap userptr");
593-
ret = -EINVAL;
594-
goto mm_unlock;
595-
}
596-
597-
if (args->size > heap->mem.size) {
598-
XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
599-
args->size, heap->mem.size);
600-
ret = -EINVAL;
601-
goto mm_unlock;
602-
}
603-
604633
abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
605-
if (IS_ERR(abo)) {
606-
ret = PTR_ERR(abo);
607-
goto mm_unlock;
608-
}
634+
if (IS_ERR(abo))
635+
return abo;
636+
609637
to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
610638
abo->type = AMDXDNA_BO_DEV;
611639
abo->client = client;
612-
abo->dev_heap = heap;
613-
ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
640+
641+
ret = amdxdna_gem_heap_alloc(abo);
614642
if (ret) {
615643
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
616-
goto mm_unlock;
644+
amdxdna_gem_destroy_obj(abo);
645+
return ERR_PTR(ret);
617646
}
618647

619-
drm_gem_object_get(to_gobj(heap));
620648
drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
621649

622-
mutex_unlock(&client->mm_lock);
623650
return abo;
624-
625-
mm_unlock:
626-
mutex_unlock(&client->mm_lock);
627-
return ERR_PTR(ret);
628651
}
629652

630653
static struct amdxdna_gem_obj *
631654
amdxdna_drm_create_cmd_bo(struct drm_device *dev,
632655
struct amdxdna_drm_create_bo *args,
633656
struct drm_file *filp)
634657
{
658+
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
635659
struct amdxdna_dev *xdna = to_xdna_dev(dev);
636660
struct drm_gem_shmem_object *shmem;
637661
struct amdxdna_gem_obj *abo;
638-
struct iosys_map map;
639662
int ret;
640663

641664
if (args->size > XDNA_MAX_CMD_BO_SIZE) {
@@ -692,7 +715,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
692715
abo = amdxdna_drm_create_dev_heap(dev, args, filp);
693716
break;
694717
case AMDXDNA_BO_DEV:
695-
abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
718+
abo = amdxdna_drm_alloc_dev_bo(dev, args, filp);
696719
break;
697720
case AMDXDNA_BO_CMD:
698721
abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
@@ -724,20 +747,13 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
724747
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
725748
int ret;
726749

750+
if (abo->type == AMDXDNA_BO_DEV)
751+
abo = abo->client->dev_heap;
752+
727753
if (is_import_bo(abo))
728754
return 0;
729755

730-
switch (abo->type) {
731-
case AMDXDNA_BO_SHMEM:
732-
case AMDXDNA_BO_DEV_HEAP:
733-
ret = drm_gem_shmem_pin(&abo->base);
734-
break;
735-
case AMDXDNA_BO_DEV:
736-
ret = drm_gem_shmem_pin(&abo->dev_heap->base);
737-
break;
738-
default:
739-
ret = -EOPNOTSUPP;
740-
}
756+
ret = drm_gem_shmem_pin(&abo->base);
741757

742758
XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
743759
return ret;
@@ -747,9 +763,6 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
747763
{
748764
int ret;
749765

750-
if (abo->type == AMDXDNA_BO_DEV)
751-
abo = abo->dev_heap;
752-
753766
mutex_lock(&abo->lock);
754767
ret = amdxdna_gem_pin_nolock(abo);
755768
mutex_unlock(&abo->lock);
@@ -759,12 +772,12 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
759772

760773
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
761774
{
775+
if (abo->type == AMDXDNA_BO_DEV)
776+
abo = abo->client->dev_heap;
777+
762778
if (is_import_bo(abo))
763779
return;
764780

765-
if (abo->type == AMDXDNA_BO_DEV)
766-
abo = abo->dev_heap;
767-
768781
mutex_lock(&abo->lock);
769782
drm_gem_shmem_unpin(&abo->base);
770783
mutex_unlock(&abo->lock);
@@ -855,10 +868,12 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
855868

856869
if (is_import_bo(abo))
857870
drm_clflush_sg(abo->base.sgt);
858-
else if (abo->type == AMDXDNA_BO_DEV)
859-
drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
860-
else
871+
else if (abo->mem.kva)
872+
drm_clflush_virt_range(abo->mem.kva + args->offset, args->size);
873+
else if (abo->base.pages)
861874
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
875+
else
876+
drm_WARN(&xdna->ddev, 1, "Can not get flush memory");
862877

863878
amdxdna_gem_unpin(abo);
864879

0 commit comments

Comments
 (0)