PageRenderTime 59ms CodeModel.GetById 11ms app.highlight 38ms RepoModel.GetById 2ms app.codeStats 0ms

/drivers/staging/gma500/psb_gtt.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 539 lines | 302 code | 78 blank | 159 comment | 46 complexity | 8fde704dc3b279cd1b0fe94d72b4e5e5 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 * Copyright (c) 2007, Intel Corporation.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms and conditions of the GNU General Public License,
  7 * version 2, as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program; if not, write to the Free Software Foundation, Inc.,
 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 17 *
 18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
 19 *	    Alan Cox <alan@linux.intel.com>
 20 */
 21
 22#include <drm/drmP.h>
 23#include "psb_drv.h"
 24
 25
 26/*
 27 *	GTT resource allocator - manage page mappings in GTT space
 28 */
 29
 30/**
 31 *	psb_gtt_mask_pte	-	generate GART pte entry
 32 *	@pfn: page number to encode
 33 *	@type: type of memory in the GART
 34 *
 35 *	Set the GART entry for the appropriate memory type.
 36 */
 37static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
 38{
 39	uint32_t mask = PSB_PTE_VALID;
 40
 41	if (type & PSB_MMU_CACHED_MEMORY)
 42		mask |= PSB_PTE_CACHED;
 43	if (type & PSB_MMU_RO_MEMORY)
 44		mask |= PSB_PTE_RO;
 45	if (type & PSB_MMU_WO_MEMORY)
 46		mask |= PSB_PTE_WO;
 47
 48	return (pfn << PAGE_SHIFT) | mask;
 49}
 50
 51/**
 52 *	psb_gtt_entry		-	find the GART entries for a gtt_range
 53 *	@dev: our DRM device
 54 *	@r: our GTT range
 55 * 
 56 *	Given a gtt_range object return the GART offset of the page table
 57 *	entries for this gtt_range
 58 */
 59u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
 60{
 61        struct drm_psb_private *dev_priv = dev->dev_private;
 62	unsigned long offset;
 63
 64	offset = r->resource.start - dev_priv->gtt_mem->start;
 65
 66	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
 67}
 68
 69/**
 70 *	psb_gtt_insert	-	put an object into the GART
 71 *	@dev: our DRM device
 72 *	@r: our GTT range
 73 *
 74 *	Take our preallocated GTT range and insert the GEM object into
 75 *	the GART.
 76 *
 77 *	FIXME: gtt lock ?
 78 */
 79static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
 80{
 81        struct drm_psb_private *dev_priv = dev->dev_private;
 82	u32 *gtt_slot, pte;
 83	int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
 84	struct page **pages;
 85	int i;
 86
 87	if (r->pages == NULL) {
 88		WARN_ON(1);
 89		return -EINVAL;
 90	}
 91
 92	WARN_ON(r->stolen);	/* refcount these maybe ? */
 93
 94	gtt_slot = psb_gtt_entry(dev, r);
 95	pages = r->pages;
 96
 97	/* Make sure we have no alias present */
 98	wbinvd();
 99
100	/* Write our page entries into the GART itself */
101	for (i = 0; i < numpages; i++) {
102		pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0/*type*/);
103		iowrite32(pte, gtt_slot++);
104	}
105	/* Make sure all the entries are set before we return */
106	ioread32(gtt_slot - 1);
107	
108	return 0;
109}
110
111/**
112 *	psb_gtt_remove	-	remove an object from the GART
113 *	@dev: our DRM device
114 *	@r: our GTT range
115 *
116 *	Remove a preallocated GTT range from the GART. Overwrite all the
117 *	page table entries with the dummy page
118 */
119
120static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
121{
122	struct drm_psb_private *dev_priv = dev->dev_private;
123	u32 *gtt_slot, pte;
124	int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
125	int i;
126
127	WARN_ON(r->stolen);
128
129	gtt_slot = psb_gtt_entry(dev, r);
130	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);;
131
132	for (i = 0; i < numpages; i++)
133		iowrite32(pte, gtt_slot++);
134	ioread32(gtt_slot - 1);
135}
136
137/**
138 *	psb_gtt_attach_pages	-	attach and pin GEM pages
139 *	@gt: the gtt range
140 *
141 *	Pin and build an in kernel list of the pages that back our GEM object.
142 *	While we hold this the pages cannot be swapped out
143 *
144 *	FIXME: Do we need to cache flush when we update the GTT
145 */
146static int psb_gtt_attach_pages(struct gtt_range *gt)
147{
148	struct inode *inode;
149	struct address_space *mapping;
150	int i;
151	struct page *p;
152	int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
153
154	WARN_ON(gt->pages);
155
156	/* This is the shared memory object that backs the GEM resource */
157	inode = gt->gem.filp->f_path.dentry->d_inode;
158	mapping = inode->i_mapping;
159
160	gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
161	if (gt->pages == NULL)
162		return -ENOMEM;
163	for (i = 0; i < pages; i++) {
164		/* FIXME: review flags later */
165		p = read_cache_page_gfp(mapping, i,
166					__GFP_COLD | GFP_KERNEL);
167		if (IS_ERR(p))
168			goto err;
169		gt->pages[i] = p;
170	}
171	return 0;
172
173err:
174	while (i--)
175		page_cache_release(gt->pages[i]);
176	kfree(gt->pages);
177	gt->pages = NULL;
178	return PTR_ERR(p);
179}
180
181/**
182 *	psb_gtt_detach_pages	-	attach and pin GEM pages
183 *	@gt: the gtt range
184 *
185 *	Undo the effect of psb_gtt_attach_pages. At this point the pages
186 *	must have been removed from the GART as they could now be paged out
187 *	and move bus address.
188 *
189 *	FIXME: Do we need to cache flush when we update the GTT
190 */
191static void psb_gtt_detach_pages(struct gtt_range *gt)
192{
193	int i;
194	int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
195
196	for (i = 0; i < pages; i++) {
197		/* FIXME: do we need to force dirty */
198		set_page_dirty(gt->pages[i]);
199		/* Undo the reference we took when populating the table */
200		page_cache_release(gt->pages[i]);
201	}
202	kfree(gt->pages);
203	gt->pages = NULL;
204}
205
206/**
207 *	psb_gtt_pin		-	pin pages into the GTT
208 *	@gt: range to pin
209 *
210 *	Pin a set of pages into the GTT. The pins are refcounted so that
211 *	multiple pins need multiple unpins to undo.
212 *
213 *	Non GEM backed objects treat this as a no-op as they are always GTT
214 *	backed objects.
215 */
216int psb_gtt_pin(struct gtt_range *gt)
217{
218	int ret;
219	struct drm_device *dev = gt->gem.dev;
220	struct drm_psb_private *dev_priv = dev->dev_private;
221
222	mutex_lock(&dev_priv->gtt_mutex);
223
224	if (gt->in_gart == 0 && gt->stolen == 0) {
225		ret = psb_gtt_attach_pages(gt);
226		if (ret < 0)
227			goto out;
228		ret = psb_gtt_insert(dev, gt);
229		if (ret < 0) {
230			psb_gtt_detach_pages(gt);
231			goto out;
232		}
233	}
234	gt->in_gart++;
235out:
236	mutex_unlock(&dev_priv->gtt_mutex);
237	return ret;
238}
239
240/**
241 *	psb_gtt_unpin		-	Drop a GTT pin requirement
242 *	@gt: range to pin
243 *
244 *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
245 *	will be removed from the GTT which will also drop the page references
246 *	and allow the VM to clean up or page stuff.
247 *
248 *	Non GEM backed objects treat this as a no-op as they are always GTT
249 *	backed objects.
250 */
251void psb_gtt_unpin(struct gtt_range *gt)
252{
253	struct drm_device *dev = gt->gem.dev;
254	struct drm_psb_private *dev_priv = dev->dev_private;
255
256	mutex_lock(&dev_priv->gtt_mutex);
257
258	WARN_ON(!gt->in_gart);
259
260	gt->in_gart--;
261	if (gt->in_gart == 0 && gt->stolen == 0) {
262		psb_gtt_remove(dev, gt);
263		psb_gtt_detach_pages(gt);
264	}
265	mutex_unlock(&dev_priv->gtt_mutex);
266}
267	
268/*
269 *	GTT resource allocator - allocate and manage GTT address space
270 */
271
272/**
273 *	psb_gtt_alloc_range	-	allocate GTT address space
274 *	@dev: Our DRM device
275 *	@len: length (bytes) of address space required
276 *	@name: resource name
277 *	@backed: resource should be backed by stolen pages
278 *
279 *	Ask the kernel core to find us a suitable range of addresses
280 *	to use for a GTT mapping.
281 *
282 *	Returns a gtt_range structure describing the object, or NULL on
283 *	error. On successful return the resource is both allocated and marked
284 *	as in use.
285 */
286struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
287						const char *name, int backed)
288{
289	struct drm_psb_private *dev_priv = dev->dev_private;
290	struct gtt_range *gt;
291	struct resource *r = dev_priv->gtt_mem;
292	int ret;
293	unsigned long start, end;
294	
295	if (backed) {
296	        /* The start of the GTT is the stolen pages */
297	        start = r->start;
298	        end = r->start + dev_priv->pg->stolen_size - 1;
299        } else {
300                /* The rest we will use for GEM backed objects */
301                start = r->start + dev_priv->pg->stolen_size;
302                end = r->end;
303        }
304
305	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
306	if (gt == NULL)
307		return NULL;
308        gt->resource.name = name;
309        gt->stolen = backed;
310        gt->in_gart = backed;
311        /* Ensure this is set for non GEM objects */
312        gt->gem.dev = dev;
313	kref_init(&gt->kref);
314
315	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
316				len, start, end, PAGE_SIZE, NULL, NULL);
317	if (ret == 0) {
318	        gt->offset = gt->resource.start - r->start;
319		return gt;
320        }
321	kfree(gt);
322	return NULL;
323}
324
325/**
326 *	psb_gtt_destroy		-	final free up of a gtt
327 *	@kref: the kref of the gtt
328 *
329 *	Called from the kernel kref put when the final reference to our
330 *	GTT object is dropped. At that point we can free up the resources.
331 *
332 *	For now we handle mmap clean up here to work around limits in GEM
333 */
334static void psb_gtt_destroy(struct kref *kref)
335{
336	struct gtt_range *gt = container_of(kref, struct gtt_range, kref);
337
338	/* Undo the mmap pin if we are destroying the object */
339	if (gt->mmapping) {
340		psb_gtt_unpin(gt);
341		gt->mmapping = 0;
342	}
343	WARN_ON(gt->in_gart && !gt->stolen);
344	release_resource(&gt->resource);
345	kfree(gt);
346}
347
348/**
349 *	psb_gtt_kref_put	-	drop reference to a GTT object
350 *	@gt: the GT being dropped
351 *
352 *	Drop a reference to a psb gtt
353 */
354void psb_gtt_kref_put(struct gtt_range *gt)
355{
356	kref_put(&gt->kref, psb_gtt_destroy);
357}
358
359/**
360 *	psb_gtt_free_range	-	release GTT address space
361 *	@dev: our DRM device
362 *	@gt: a mapping created with psb_gtt_alloc_range
363 *
364 *	Release a resource that was allocated with psb_gtt_alloc_range
365 */
366void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
367{
368	psb_gtt_kref_put(gt);
369}
370
371
372struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
373{
374	struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
375
376	if (!tmp)
377		return NULL;
378
379	init_rwsem(&tmp->sem);
380	tmp->dev = dev;
381
382	return tmp;
383}
384
385void psb_gtt_takedown(struct drm_device *dev)
386{
387	struct drm_psb_private *dev_priv = dev->dev_private;
388
389	/* FIXME: iounmap dev_priv->vram_addr etc */
390	if (dev_priv->gtt_map) {
391		iounmap(dev_priv->gtt_map);
392		dev_priv->gtt_map = NULL;
393	}
394	if (dev_priv->gtt_initialized) {
395		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
396				      dev_priv->gmch_ctrl);
397		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
398		(void) PSB_RVDC32(PSB_PGETBL_CTL);
399	}
400	kfree(dev_priv->pg);
401	dev_priv->pg = NULL;
402}
403
404int psb_gtt_init(struct drm_device *dev, int resume)
405{
406	struct drm_psb_private *dev_priv = dev->dev_private;
407	unsigned gtt_pages;
408	unsigned long stolen_size, vram_stolen_size;
409	unsigned i, num_pages;
410	unsigned pfn_base;
411	uint32_t vram_pages;
412	uint32_t tt_pages;
413	uint32_t *ttm_gtt_map;
414	uint32_t dvmt_mode = 0;
415	struct psb_gtt *pg;
416
417	int ret = 0;
418	uint32_t pte;
419
420	mutex_init(&dev_priv->gtt_mutex);
421
422	dev_priv->pg = pg = psb_gtt_alloc(dev);
423	if (pg == NULL)
424	        return -ENOMEM;
425
426	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
427	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
428			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
429
430	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
431	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
432	(void) PSB_RVDC32(PSB_PGETBL_CTL);
433
434	/* The root resource we allocate address space from */
435	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
436
437	dev_priv->gtt_initialized = 1;
438
439	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
440
441	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
442	/* fix me: video mmu has hw bug to access 0x0D0000000,
443	 * then make gatt start at 0x0e000,0000 */
444	pg->mmu_gatt_start = 0xE0000000;
445	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
446	gtt_pages =
447	    pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
448	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
449	    >> PAGE_SHIFT;
450
451	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
452	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
453
454	stolen_size = vram_stolen_size;
455
456	printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
457		pg->gatt_start, pg->gatt_pages/256);
458	printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
459		pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
460	printk(KERN_INFO "Stolen memory information\n");
461	printk(KERN_INFO "       base in RAM: 0x%x\n", dev_priv->stolen_base);
462	printk(KERN_INFO "       size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
463		vram_stolen_size/1024);
464	dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
465	printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n",
466		(dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
467
468	if (resume && (gtt_pages != pg->gtt_pages) &&
469	    (stolen_size != pg->stolen_size)) {
470		DRM_ERROR("GTT resume error.\n");
471		ret = -EINVAL;
472		goto out_err;
473	}
474
475	pg->gtt_pages = gtt_pages;
476	pg->stolen_size = stolen_size;
477	dev_priv->vram_stolen_size = vram_stolen_size;
478	dev_priv->gtt_map =
479	    ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
480	if (!dev_priv->gtt_map) {
481		DRM_ERROR("Failure to map gtt.\n");
482		ret = -ENOMEM;
483		goto out_err;
484	}
485
486	dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
487	if (!dev_priv->vram_addr) {
488		DRM_ERROR("Failure to map stolen base.\n");
489		ret = -ENOMEM;
490		goto out_err;
491	}
492
493	DRM_DEBUG("%s: vram kernel virtual address %p\n", dev_priv->vram_addr);
494
495	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
496		(pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
497
498	ttm_gtt_map = dev_priv->gtt_map + tt_pages / 2;
499
500	/*
501	 * insert vram stolen pages.
502	 */
503
504	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
505	vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
506	printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
507		num_pages, pfn_base, 0);
508	for (i = 0; i < num_pages; ++i) {
509		pte = psb_gtt_mask_pte(pfn_base + i, 0);
510		iowrite32(pte, dev_priv->gtt_map + i);
511	}
512
513	/*
514	 * Init rest of gtt managed by IMG.
515	 */
516	pfn_base = page_to_pfn(dev_priv->scratch_page);
517	pte = psb_gtt_mask_pte(pfn_base, 0);
518	for (; i < tt_pages / 2 - 1; ++i)
519		iowrite32(pte, dev_priv->gtt_map + i);
520
521	/*
522	 * Init rest of gtt managed by TTM.
523	 */
524
525	pfn_base = page_to_pfn(dev_priv->scratch_page);
526	pte = psb_gtt_mask_pte(pfn_base, 0);
527	PSB_DEBUG_INIT("Initializing the rest of a total "
528		       "of %d gtt pages.\n", pg->gatt_pages);
529
530	for (; i < pg->gatt_pages - tt_pages / 2; ++i)
531		iowrite32(pte, ttm_gtt_map + i);
532	(void) ioread32(dev_priv->gtt_map + i - 1);
533
534	return 0;
535
536out_err:
537	psb_gtt_takedown(dev);
538	return ret;
539}