summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-12-19 12:43:45 +0000
committerJani Nikula <jani.nikula@intel.com>2016-12-20 16:30:47 +0200
commitabb0deacb5a6713b918ac6395182cb27bb88be69 (patch)
treee6f1bb3c431498b97689930c89df9e513b93fef7 /drivers/gpu
parentd8953c8326d87a337763ca547ad7db034a94ddb1 (diff)
drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping
If we at first do not succeed with attempting to remap our physical pages using a coalesced scattergather list, try again with one scattergather entry per page. This should help with swiotlb as it uses a limited buffer size and only searches for contiguous chunks within its buffer aligned up to the next boundary - i.e. we may prematurely cause a failure as we are unable to utilize the unused space between large chunks and trigger an error such as: i915 0000:00:02.0: swiotlb buffer is full (sz: 1630208 bytes) Reported-by: Juergen Gross <jgross@suse.com> Tested-by: Juergen Gross <jgross@suse.com> Fixes: 871dfbd67d4e ("drm/i915: Allow compaction upto SWIOTLB max segment size") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: <drm-intel-fixes@lists.freedesktop.org> Link: http://patchwork.freedesktop.org/patch/msgid/20161219124346.550-1-chris@chris-wilson.co.uk Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> (cherry picked from commit d766ef53006c2c38a7fe2bef0904105a793383f2) Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3b6eb651d88a..c6f80bc3b7bc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2342,7 +2342,8 @@ static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int page_count, i;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
@@ -2368,7 +2369,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (st == NULL)
return ERR_PTR(-ENOMEM);
- page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
@@ -2427,8 +2428,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret)
- goto err_pages;
+ if (ret) {
+ /* DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&dev_priv->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);