Commit ca9693a1 authored by Dave Airlie's avatar Dave Airlie

Merge remote branch 'nouveau/drm-nouveau-next' of ../drm-nouveau-next into drm-core-next

* 'nouveau/drm-nouveau-next' of ../drm-nouveau-next: (93 commits)
  drm/nv50: fix a couple of vm init issues
  drm/nv04-nv40: Fix up PCI(E) GART DMA object bus address calculation.
  drm/nouveau: kick vram functions out into an "engine"
  drm/nouveau: allow gpuobj vinst to be a virtual address when necessary
  drm/nv50: tidy up PCIEGART implementation
  drm/nv50: enable non-contig vram allocations where requested
  drm/nv50: enable 4KiB pages for small vram allocations
  drm/nv50: implement global channel address space on new VM code
  drm/nv50: implement BAR1/BAR3 management on top of new VM code
  drm/nv50: import new vm code
  drm/nv50: implement custom vram mm
  drm/nouveau: Avoid potential race between nouveau_fence_update() and context takedown.
  drm/nouveau: fix use of drm_mm_node in semaphore object
  drm/nouveau: wrap calls to ttm_bo_validate()
  drm/nouveau: no need to zero dma objects, we fill them completely anyway
  drm/nouveau: introduce a util function to wait on reg != val
  drm/nouveau: implicitly insert non-DMA objects into RAMHT
  drm/nouveau: make fifo.create_context() responsible for mapping control regs
  drm/nouveau: Spin for a bit in nouveau_fence_wait() before yielding the CPU.
  drm/nouveau: Use WC memory on the AGP GART.
  ...
parents b921bae2 c45aadab
...@@ -5,12 +5,13 @@ ...@@ -5,12 +5,13 @@
ccflags-y := -Iinclude/drm ccflags-y := -Iinclude/drm
nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \
nouveau_sgdma.o nouveau_dma.o \ nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
nouveau_dp.o nouveau_ramht.o \ nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
nouveau_mm.o nouveau_vm.o \
nv04_timer.o \ nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
...@@ -18,14 +19,16 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ ...@@ -18,14 +19,16 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_graph.o nv10_graph.o nv20_graph.o \ nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \ nv40_graph.o nv50_graph.o nvc0_graph.o \
nv40_grctx.o nv50_grctx.o \ nv40_grctx.o nv50_grctx.o \
nv84_crypt.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
nv10_gpio.o nv50_gpio.o \ nv10_gpio.o nv50_gpio.o \
nv50_calc.o \ nv50_calc.o \
nv04_pm.o nv50_pm.o nva3_pm.o nv04_pm.o nv50_pm.o nva3_pm.o \
nv50_vram.o nv50_vm.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
......
...@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) ...@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
return entry; return entry;
} }
static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
int heads, int or)
{ {
struct dcb_entry *entry = new_dcb_entry(dcb); struct dcb_entry *entry = new_dcb_entry(dcb);
entry->type = 0; entry->type = type;
entry->i2c_index = i2c; entry->i2c_index = i2c;
entry->heads = heads; entry->heads = heads;
entry->location = DCB_LOC_ON_CHIP; if (type != OUTPUT_ANALOG)
entry->or = 1; entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
} entry->or = or;
static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
entry->type = 2;
entry->i2c_index = LEGACY_I2C_PANEL;
entry->heads = twoHeads ? 3 : 1;
entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
entry->duallink_possible = false; /* SiI164 and co. are single link */
#if 0
/*
* For dvi-a either crtc probably works, but my card appears to only
* support dvi-d. "nvidia" still attempts to program it for dvi-a,
* doing the full fp output setup (program 0x6808.. fp dimension regs,
* setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
* the monitor picks up the mode res ok and lights up, but no pixel
* data appears, so the board manufacturer probably connected up the
* sync lines, but missed the video traces / components
*
* with this introduction, dvi-a left as an exercise for the reader.
*/
fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
#endif
}
static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
entry->type = 1;
entry->i2c_index = LEGACY_I2C_TV;
entry->heads = twoHeads ? 3 : 1;
entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
} }
static bool static bool
...@@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) ...@@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
return true; return true;
} }
static void
fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
{
struct dcb_table *dcb = &bios->dcb;
int all_heads = (nv_two_heads(dev) ? 3 : 1);
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
all_heads, 0);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
all_heads, 1);
}
static int static int
parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &bios->dcb; struct dcb_table *dcb = &bios->dcb;
...@@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) ...@@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
/* this situation likely means a really old card, pre DCB */ /* this situation likely means a really old card, pre DCB */
if (dcbptr == 0x0) { if (dcbptr == 0x0) {
NV_INFO(dev, "Assuming a CRT output exists\n"); fabricate_dcb_encoder_table(dev, bios);
fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_tv_output(dcb, twoHeads);
return 0; return 0;
} }
...@@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) ...@@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
*/ */
NV_TRACEWARN(dev, "No useful information in BIOS output table; " NV_TRACEWARN(dev, "No useful information in BIOS output table; "
"adding all possible outputs\n"); "adding all possible outputs\n");
fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); fabricate_dcb_encoder_table(dev, bios);
/*
* Attempt to detect TV before DVI because the test
* for the former is more accurate and it rules the
* latter out.
*/
if (nv04_tv_identify(dev,
bios->legacy.i2c_indices.tv) >= 0)
fabricate_tv_output(dcb, twoHeads);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dvi_i_output(dcb, twoHeads);
return 0; return 0;
} }
...@@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev) ...@@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
ret = parse_dcb_table(dev, bios, nv_two_heads(dev)); ret = parse_dcb_table(dev, bios);
if (ret) if (ret)
return ret; return ret;
......
This diff is collapsed.
This diff is collapsed.
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include "nouveau_connector.h" #include "nouveau_connector.h"
#include "nouveau_hw.h" #include "nouveau_hw.h"
static void nouveau_connector_hotplug(void *, int);
static struct nouveau_encoder * static struct nouveau_encoder *
find_encoder_by_type(struct drm_connector *connector, int type) find_encoder_by_type(struct drm_connector *connector, int type)
{ {
...@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector) ...@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector)
} }
static void static void
nouveau_connector_destroy(struct drm_connector *drm_connector) nouveau_connector_destroy(struct drm_connector *connector)
{ {
struct nouveau_connector *nv_connector = struct nouveau_connector *nv_connector = nouveau_connector(connector);
nouveau_connector(drm_connector); struct drm_nouveau_private *dev_priv;
struct nouveau_gpio_engine *pgpio;
struct drm_device *dev; struct drm_device *dev;
if (!nv_connector) if (!nv_connector)
return; return;
dev = nv_connector->base.dev; dev = nv_connector->base.dev;
dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n"); NV_DEBUG_KMS(dev, "\n");
pgpio = &dev_priv->engine.gpio;
if (pgpio->irq_unregister) {
pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
nouveau_connector_hotplug, connector);
}
kfree(nv_connector->edid); kfree(nv_connector->edid);
drm_sysfs_connector_remove(drm_connector); drm_sysfs_connector_remove(connector);
drm_connector_cleanup(drm_connector); drm_connector_cleanup(connector);
kfree(drm_connector); kfree(connector);
} }
static struct nouveau_i2c_chan * static struct nouveau_i2c_chan *
...@@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index) ...@@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
{ {
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_connector *nv_connector = NULL; struct nouveau_connector *nv_connector = NULL;
struct dcb_connector_table_entry *dcb = NULL; struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector; struct drm_connector *connector;
...@@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index) ...@@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
break; break;
} }
if (pgpio->irq_register) {
pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
nouveau_connector_hotplug, connector);
}
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
dcb->drm = connector; dcb->drm = connector;
return dcb->drm; return dcb->drm;
...@@ -886,3 +902,29 @@ nouveau_connector_create(struct drm_device *dev, int index) ...@@ -886,3 +902,29 @@ nouveau_connector_create(struct drm_device *dev, int index)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static void
nouveau_connector_hotplug(void *data, int plugged)
{
struct drm_connector *connector = data;
struct drm_device *dev = connector->dev;
NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
drm_get_connector_name(connector));
if (connector->encoder && connector->encoder->crtc &&
connector->encoder->crtc->enabled) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
struct drm_encoder_helper_funcs *helper =
connector->encoder->helper_private;
if (nv_encoder->dcb->type == OUTPUT_DP) {
if (plugged)
helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
else
helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
}
}
drm_helper_hpd_irq_event(dev);
}
...@@ -29,6 +29,9 @@ ...@@ -29,6 +29,9 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_fb.h" #include "nouveau_fb.h"
#include "nouveau_fbcon.h" #include "nouveau_fbcon.h"
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
static void static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
...@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = { ...@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.output_poll_changed = nouveau_fbcon_output_poll_changed, .output_poll_changed = nouveau_fbcon_output_poll_changed,
}; };
int
nouveau_vblank_enable(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->card_type >= NV_50)
nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
else
NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
NV_PCRTC_INTR_0_VBLANK);
return 0;
}
void
nouveau_vblank_disable(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->card_type >= NV_50)
nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
else
NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
}
static int
nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
struct nouveau_bo *new_bo)
{
int ret;
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
if (ret)
return ret;
ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
if (ret)
goto fail;
ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
if (ret)
goto fail_unreserve;
return 0;
fail_unreserve:
ttm_bo_unreserve(&new_bo->bo);
fail:
nouveau_bo_unpin(new_bo);
return ret;
}
static void
nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
struct nouveau_bo *new_bo,
struct nouveau_fence *fence)
{
nouveau_bo_fence(new_bo, fence);
ttm_bo_unreserve(&new_bo->bo);
nouveau_bo_fence(old_bo, fence);
ttm_bo_unreserve(&old_bo->bo);
nouveau_bo_unpin(old_bo);
}
static int
nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_bo *old_bo,
struct nouveau_bo *new_bo,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
struct drm_device *dev = chan->dev;
unsigned long flags;
int ret;
/* Queue it to the pending list */
spin_lock_irqsave(&dev->event_lock, flags);
list_add_tail(&s->head, &chan->nvsw.flip);
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
if (ret)
goto fail;
/* Emit the pageflip */
ret = RING_SPACE(chan, 2);
if (ret)
goto fail;
BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING(chan, 0);
FIRE_RING(chan);
ret = nouveau_fence_new(chan, pfence, true);
if (ret)
goto fail;
return 0;
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);
spin_unlock_irqrestore(&dev->event_lock, flags);
return ret;
}
int
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
struct nouveau_channel *chan;
struct nouveau_fence *fence;
int ret;
if (dev_priv->engine.graph.accel_blocked)
return -ENODEV;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
/* Don't let the buffers go away while we flip */
ret = nouveau_page_flip_reserve(old_bo, new_bo);
if (ret)
goto fail_free;
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
{ { }, s->event, nouveau_crtc(crtc)->index,
fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
chan = nouveau_fence_channel(new_bo->bo.sync_obj);
if (!chan)
chan = nouveau_channel_get_unlocked(dev_priv->channel);
mutex_lock(&chan->mutex);
/* Emit a page flip */
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
nouveau_channel_put(&chan);
if (ret)
goto fail_unreserve;
/* Update the crtc struct and cleanup */
crtc->fb = fb;
nouveau_page_flip_unreserve(old_bo, new_bo, fence);
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
fail_free:
kfree(s);
return ret;
}
int
nouveau_finish_page_flip(struct nouveau_channel *chan,
struct nouveau_page_flip_state *ps)
{
struct drm_device *dev = chan->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (list_empty(&chan->nvsw.flip)) {
NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EINVAL;
}
s = list_first_entry(&chan->nvsw.flip,
struct nouveau_page_flip_state, head);
if (s->event) {
struct drm_pending_vblank_event *e = s->event;
struct timeval now;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
list_del(&s->head);
*ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
...@@ -59,17 +59,11 @@ nouveau_dma_init(struct nouveau_channel *chan) ...@@ -59,17 +59,11 @@ nouveau_dma_init(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret, i; int ret, i;
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
0x0039 : 0x5039, &obj); 0x0039 : 0x5039);
if (ret)
return ret;
ret = nouveau_ramht_insert(chan, NvM2MF, obj);
nouveau_gpuobj_ref(NULL, &obj);
if (ret) if (ret)
return ret; return ret;
......
...@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) ...@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
struct bit_displayport_encoder_table *dpe; struct bit_displayport_encoder_table *dpe;
int dpe_headerlen; int dpe_headerlen;
uint8_t config[4], status[3]; uint8_t config[4], status[3];
bool cr_done, cr_max_vs, eq_done; bool cr_done, cr_max_vs, eq_done, hpd_state;
int ret = 0, i, tries, voltage; int ret = 0, i, tries, voltage;
NV_DEBUG_KMS(dev, "link training!!\n"); NV_DEBUG_KMS(dev, "link training!!\n");
...@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) ...@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
/* disable hotplug detect, this flips around on some panels during /* disable hotplug detect, this flips around on some panels during
* link training. * link training.
*/ */
pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
if (dpe->script0) { if (dpe->script0) {
NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
...@@ -439,7 +439,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) ...@@ -439,7 +439,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
} }
/* re-enable hotplug detect */ /* re-enable hotplug detect */
pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
return eq_done; return eq_done;
} }
......
...@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); ...@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
int nouveau_perflvl_wr; int nouveau_perflvl_wr;
module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
int nouveau_msi;
module_param_named(msi, nouveau_msi, int, 0400);
int nouveau_fbpercrtc; int nouveau_fbpercrtc;
#if 0 #if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
...@@ -193,23 +197,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) ...@@ -193,23 +197,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
NV_INFO(dev, "Idling channels...\n"); NV_INFO(dev, "Idling channels...\n");
for (i = 0; i < pfifo->channels; i++) { for (i = 0; i < pfifo->channels; i++) {
struct nouveau_fence *fence = NULL; chan = dev_priv->channels.ptr[i];
chan = dev_priv->fifos[i];
if (!chan || (dev_priv->card_type >= NV_50 &&
chan == dev_priv->fifos[0]))
continue;
ret = nouveau_fence_new(chan, &fence, true);
if (ret == 0) {
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
}
if (ret) { if (chan && chan->pushbuf_bo)
NV_ERROR(dev, "Failed to idle channel %d for suspend\n", nouveau_channel_idle(chan);
chan->id);
}
} }
pgraph->fifo_access(dev, false); pgraph->fifo_access(dev, false);
...@@ -219,17 +210,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) ...@@ -219,17 +210,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pfifo->unload_context(dev); pfifo->unload_context(dev);
pgraph->unload_context(dev); pgraph->unload_context(dev);
NV_INFO(dev, "Suspending GPU objects...\n"); ret = pinstmem->suspend(dev);
ret = nouveau_gpuobj_suspend(dev);
if (ret) { if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret); NV_ERROR(dev, "... failed: %d\n", ret);
goto out_abort; goto out_abort;
} }
ret = pinstmem->suspend(dev); NV_INFO(dev, "Suspending GPU objects...\n");
ret = nouveau_gpuobj_suspend(dev);
if (ret) { if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret); NV_ERROR(dev, "... failed: %d\n", ret);
nouveau_gpuobj_suspend_cleanup(dev); pinstmem->resume(dev);
goto out_abort; goto out_abort;
} }
...@@ -294,17 +285,18 @@ nouveau_pci_resume(struct pci_dev *pdev) ...@@ -294,17 +285,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
} }
} }
NV_INFO(dev, "Restoring GPU objects...\n");
nouveau_gpuobj_resume(dev);
NV_INFO(dev, "Reinitialising engines...\n"); NV_INFO(dev, "Reinitialising engines...\n");
engine->instmem.resume(dev); engine->instmem.resume(dev);
engine->mc.init(dev); engine->mc.init(dev);
engine->timer.init(dev); engine->timer.init(dev);
engine->fb.init(dev); engine->fb.init(dev);
engine->graph.init(dev); engine->graph.init(dev);
engine->crypt.init(dev);
engine->fifo.init(dev); engine->fifo.init(dev);
NV_INFO(dev, "Restoring GPU objects...\n");
nouveau_gpuobj_resume(dev);
nouveau_irq_postinstall(dev); nouveau_irq_postinstall(dev);
/* Re-write SKIPS, they'll have been lost over the suspend */ /* Re-write SKIPS, they'll have been lost over the suspend */
...@@ -313,7 +305,7 @@ nouveau_pci_resume(struct pci_dev *pdev) ...@@ -313,7 +305,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
int j; int j;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) { for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
chan = dev_priv->fifos[i]; chan = dev_priv->channels.ptr[i];
if (!chan || !chan->pushbuf_bo) if (!chan || !chan->pushbuf_bo)
continue; continue;
...@@ -347,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev) ...@@ -347,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.nvbo->bo.offset -
dev_priv->vm_vram_base);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
nv_crtc->cursor_saved_y); nv_crtc->cursor_saved_y);
} }
/* Force CLUT to get re-loaded during modeset */ /* Force CLUT to get re-loaded during modeset */
...@@ -393,6 +383,9 @@ static struct drm_driver driver = { ...@@ -393,6 +383,9 @@ static struct drm_driver driver = {
.irq_postinstall = nouveau_irq_postinstall, .irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall, .irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler, .irq_handler = nouveau_irq_handler,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = nouveau_vblank_enable,
.disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers, .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls, .ioctls = nouveau_ioctls,
.fops = { .fops = {
...@@ -403,6 +396,7 @@ static struct drm_driver driver = { ...@@ -403,6 +396,7 @@ static struct drm_driver driver = {
.mmap = nouveau_ttm_mmap, .mmap = nouveau_ttm_mmap,
.poll = drm_poll, .poll = drm_poll,
.fasync = drm_fasync, .fasync = drm_fasync,
.read = drm_read,
#if defined(CONFIG_COMPAT) #if defined(CONFIG_COMPAT)
.compat_ioctl = nouveau_compat_ioctl, .compat_ioctl = nouveau_compat_ioctl,
#endif #endif
......
This diff is collapsed.
...@@ -49,6 +49,96 @@ ...@@ -49,6 +49,96 @@
#include "nouveau_fbcon.h" #include "nouveau_fbcon.h"
#include "nouveau_dma.h" #include "nouveau_dma.h"
static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
return;
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&dev_priv->channel->mutex)) {
if (dev_priv->card_type < NV_50)
ret = nv04_fbcon_fillrect(info, rect);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_fbcon_fillrect(info, rect);
mutex_unlock(&dev_priv->channel->mutex);
}
if (ret == 0)
return;
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
cfb_fillrect(info, rect);
}
static void
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
return;
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&dev_priv->channel->mutex)) {
if (dev_priv->card_type < NV_50)
ret = nv04_fbcon_copyarea(info, image);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_fbcon_copyarea(info, image);
mutex_unlock(&dev_priv->channel->mutex);
}
if (ret == 0)
return;
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
cfb_copyarea(info, image);
}
static void
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
return;
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&dev_priv->channel->mutex)) {
if (dev_priv->card_type < NV_50)
ret = nv04_fbcon_imageblit(info, image);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_fbcon_imageblit(info, image);
mutex_unlock(&dev_priv->channel->mutex);
}
if (ret == 0)
return;
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
}
static int static int
nouveau_fbcon_sync(struct fb_info *info) nouveau_fbcon_sync(struct fb_info *info)
{ {
...@@ -58,12 +148,17 @@ nouveau_fbcon_sync(struct fb_info *info) ...@@ -58,12 +148,17 @@ nouveau_fbcon_sync(struct fb_info *info)
struct nouveau_channel *chan = dev_priv->channel; struct nouveau_channel *chan = dev_priv->channel;
int ret, i; int ret, i;
if (!chan || !chan->accel_done || if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING || info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_HWACCEL_DISABLED) info->flags & FBINFO_HWACCEL_DISABLED)
return 0; return 0;
if (RING_SPACE(chan, 4)) { if (!mutex_trylock(&chan->mutex))
return 0;
ret = RING_SPACE(chan, 4);
if (ret) {
mutex_unlock(&chan->mutex);
nouveau_fbcon_gpu_lockup(info); nouveau_fbcon_gpu_lockup(info);
return 0; return 0;
} }
...@@ -74,6 +169,7 @@ nouveau_fbcon_sync(struct fb_info *info) ...@@ -74,6 +169,7 @@ nouveau_fbcon_sync(struct fb_info *info)
OUT_RING(chan, 0); OUT_RING(chan, 0);
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
FIRE_RING(chan); FIRE_RING(chan);
mutex_unlock(&chan->mutex);
ret = -EBUSY; ret = -EBUSY;
for (i = 0; i < 100000; i++) { for (i = 0; i < 100000; i++) {
...@@ -97,9 +193,9 @@ static struct fb_ops nouveau_fbcon_ops = { ...@@ -97,9 +193,9 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var, .fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par, .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cfb_fillrect, .fb_fillrect = nouveau_fbcon_fillrect,
.fb_copyarea = cfb_copyarea, .fb_copyarea = nouveau_fbcon_copyarea,
.fb_imageblit = cfb_imageblit, .fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync, .fb_sync = nouveau_fbcon_sync,
.fb_pan_display = drm_fb_helper_pan_display, .fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank, .fb_blank = drm_fb_helper_blank,
...@@ -108,29 +204,13 @@ static struct fb_ops nouveau_fbcon_ops = { ...@@ -108,29 +204,13 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave, .fb_debug_leave = drm_fb_helper_debug_leave,
}; };
static struct fb_ops nv04_fbcon_ops = { static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var, .fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par, .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nv04_fbcon_fillrect, .fb_fillrect = cfb_fillrect,
.fb_copyarea = nv04_fbcon_copyarea, .fb_copyarea = cfb_copyarea,
.fb_imageblit = nv04_fbcon_imageblit, .fb_imageblit = cfb_imageblit,
.fb_sync = nouveau_fbcon_sync,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nv50_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nv50_fbcon_fillrect,
.fb_copyarea = nv50_fbcon_copyarea,
.fb_imageblit = nv50_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
.fb_pan_display = drm_fb_helper_pan_display, .fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank, .fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap, .fb_setcmap = drm_fb_helper_setcmap,
...@@ -257,9 +337,9 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ...@@ -257,9 +337,9 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT; FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT; info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_ops; info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - info->fix.smem_start = dev->mode_config.fb_base +
dev_priv->vm_vram_base; (nvbo->bo.mem.start << PAGE_SHIFT);
info->fix.smem_len = size; info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
...@@ -285,19 +365,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ...@@ -285,19 +365,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1; info->pixmap.scan_align = 1;
mutex_unlock(&dev->struct_mutex);
if (dev_priv->channel && !nouveau_nofbaccel) { if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) { ret = -ENODEV;
case NV_C0: if (dev_priv->card_type < NV_50)
break; ret = nv04_fbcon_accel_init(info);
case NV_50: else
nv50_fbcon_accel_init(info); if (dev_priv->card_type < NV_C0)
info->fbops = &nv50_fbcon_ops; ret = nv50_fbcon_accel_init(info);
break;
default: if (ret == 0)
nv04_fbcon_accel_init(info); info->fbops = &nouveau_fbcon_ops;
info->fbops = &nv04_fbcon_ops;
break;
};
} }
nouveau_fbcon_zfill(dev, nfbdev); nouveau_fbcon_zfill(dev, nfbdev);
...@@ -308,7 +387,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, ...@@ -308,7 +387,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
nouveau_fb->base.height, nouveau_fb->base.height,
nvbo->bo.offset, nvbo); nvbo->bo.offset, nvbo);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info); vga_switcheroo_client_fb_set(dev->pdev, info);
return 0; return 0;
......
...@@ -40,13 +40,13 @@ struct nouveau_fbdev { ...@@ -40,13 +40,13 @@ struct nouveau_fbdev {
void nouveau_fbcon_restore(void); void nouveau_fbcon_restore(void);
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info); int nv04_fbcon_accel_init(struct fb_info *info);
void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info); int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info); void nouveau_fbcon_gpu_lockup(struct fb_info *info);
......
...@@ -64,6 +64,7 @@ nouveau_fence_del(struct kref *ref) ...@@ -64,6 +64,7 @@ nouveau_fence_del(struct kref *ref)
struct nouveau_fence *fence = struct nouveau_fence *fence =
container_of(ref, struct nouveau_fence, refcount); container_of(ref, struct nouveau_fence, refcount);
nouveau_channel_ref(NULL, &fence->channel);
kfree(fence); kfree(fence);
} }
...@@ -76,14 +77,17 @@ nouveau_fence_update(struct nouveau_channel *chan) ...@@ -76,14 +77,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
spin_lock(&chan->fence.lock); spin_lock(&chan->fence.lock);
if (USE_REFCNT(dev)) /* Fetch the last sequence if the channel is still up and running */
sequence = nvchan_rd32(chan, 0x48); if (likely(!list_empty(&chan->fence.pending))) {
else if (USE_REFCNT(dev))
sequence = atomic_read(&chan->fence.last_sequence_irq); sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence) if (chan->fence.sequence_ack == sequence)
goto out; goto out;
chan->fence.sequence_ack = sequence; chan->fence.sequence_ack = sequence;
}
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
sequence = fence->sequence; sequence = fence->sequence;
...@@ -113,13 +117,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, ...@@ -113,13 +117,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
if (!fence) if (!fence)
return -ENOMEM; return -ENOMEM;
kref_init(&fence->refcount); kref_init(&fence->refcount);
fence->channel = chan; nouveau_channel_ref(chan, &fence->channel);
if (emit) if (emit)
ret = nouveau_fence_emit(fence); ret = nouveau_fence_emit(fence);
if (ret) if (ret)
nouveau_fence_unref((void *)&fence); nouveau_fence_unref(&fence);
*pfence = fence; *pfence = fence;
return ret; return ret;
} }
...@@ -127,7 +131,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, ...@@ -127,7 +131,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
struct nouveau_channel * struct nouveau_channel *
nouveau_fence_channel(struct nouveau_fence *fence) nouveau_fence_channel(struct nouveau_fence *fence)
{ {
return fence ? fence->channel : NULL; return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
} }
int int
...@@ -182,7 +186,7 @@ nouveau_fence_work(struct nouveau_fence *fence, ...@@ -182,7 +186,7 @@ nouveau_fence_work(struct nouveau_fence *fence,
} }
void void
nouveau_fence_unref(void **sync_obj) __nouveau_fence_unref(void **sync_obj)
{ {
struct nouveau_fence *fence = nouveau_fence(*sync_obj); struct nouveau_fence *fence = nouveau_fence(*sync_obj);
...@@ -192,7 +196,7 @@ nouveau_fence_unref(void **sync_obj) ...@@ -192,7 +196,7 @@ nouveau_fence_unref(void **sync_obj)
} }
void * void *
nouveau_fence_ref(void *sync_obj) __nouveau_fence_ref(void *sync_obj)
{ {
struct nouveau_fence *fence = nouveau_fence(sync_obj); struct nouveau_fence *fence = nouveau_fence(sync_obj);
...@@ -201,7 +205,7 @@ nouveau_fence_ref(void *sync_obj) ...@@ -201,7 +205,7 @@ nouveau_fence_ref(void *sync_obj)
} }
bool bool
nouveau_fence_signalled(void *sync_obj, void *sync_arg) __nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{ {
struct nouveau_fence *fence = nouveau_fence(sync_obj); struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel; struct nouveau_channel *chan = fence->channel;
...@@ -214,13 +218,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) ...@@ -214,13 +218,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
} }
int int
nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{ {
unsigned long timeout = jiffies + (3 * DRM_HZ); unsigned long timeout = jiffies + (3 * DRM_HZ);
unsigned long sleep_time = jiffies + 1;
int ret = 0; int ret = 0;
while (1) { while (1) {
if (nouveau_fence_signalled(sync_obj, sync_arg)) if (__nouveau_fence_signalled(sync_obj, sync_arg))
break; break;
if (time_after_eq(jiffies, timeout)) { if (time_after_eq(jiffies, timeout)) {
...@@ -230,7 +235,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) ...@@ -230,7 +235,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE __set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE); : TASK_UNINTERRUPTIBLE);
if (lazy) if (lazy && time_after_eq(jiffies, sleep_time))
schedule_timeout(1); schedule_timeout(1);
if (intr && signal_pending(current)) { if (intr && signal_pending(current)) {
...@@ -368,7 +373,7 @@ emit_semaphore(struct nouveau_channel *chan, int method, ...@@ -368,7 +373,7 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref); kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema); nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref((void *)&fence); nouveau_fence_unref(&fence);
return 0; return 0;
} }
...@@ -380,33 +385,49 @@ nouveau_fence_sync(struct nouveau_fence *fence, ...@@ -380,33 +385,49 @@ nouveau_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *chan = nouveau_fence_channel(fence); struct nouveau_channel *chan = nouveau_fence_channel(fence);
struct drm_device *dev = wchan->dev; struct drm_device *dev = wchan->dev;
struct nouveau_semaphore *sema; struct nouveau_semaphore *sema;
int ret; int ret = 0;
if (likely(!fence || chan == wchan || if (likely(!chan || chan == wchan ||
nouveau_fence_signalled(fence, NULL))) nouveau_fence_signalled(fence)))
return 0; goto out;
sema = alloc_semaphore(dev); sema = alloc_semaphore(dev);
if (!sema) { if (!sema) {
/* Early card or broken userspace, fall back to /* Early card or broken userspace, fall back to
* software sync. */ * software sync. */
return nouveau_fence_wait(fence, NULL, false, false); ret = nouveau_fence_wait(fence, true, false);
goto out;
}
/* try to take chan's mutex, if we can't take it right away
* we have to fallback to software sync to prevent locking
* order issues
*/
if (!mutex_trylock(&chan->mutex)) {
ret = nouveau_fence_wait(fence, true, false);
goto out_unref;
} }
/* Make wchan wait until it gets signalled */ /* Make wchan wait until it gets signalled */
ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
if (ret) if (ret)
goto out; goto out_unlock;
/* Signal the semaphore from chan */ /* Signal the semaphore from chan */
ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
out:
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
kref_put(&sema->ref, free_semaphore); kref_put(&sema->ref, free_semaphore);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
return ret; return ret;
} }
int int
nouveau_fence_flush(void *sync_obj, void *sync_arg) __nouveau_fence_flush(void *sync_obj, void *sync_arg)
{ {
return 0; return 0;
} }
...@@ -420,12 +441,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) ...@@ -420,12 +441,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
int ret; int ret;
/* Create an NV_SW object for various sync purposes */ /* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
ret = nouveau_ramht_insert(chan, NvSw, obj);
nouveau_gpuobj_ref(NULL, &obj);
if (ret) if (ret)
return ret; return ret;
...@@ -437,13 +453,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) ...@@ -437,13 +453,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
/* Create a DMA object for the shared cross-channel sync area. */ /* Create a DMA object for the shared cross-channel sync area. */
if (USE_SEMA(dev)) { if (USE_SEMA(dev)) {
struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
mem->start << PAGE_SHIFT, mem->start << PAGE_SHIFT,
mem->size << PAGE_SHIFT, mem->size, NV_MEM_ACCESS_RW,
NV_DMA_ACCESS_RW, NV_MEM_TARGET_VRAM, &obj);
NV_DMA_TARGET_VIDMEM, &obj);
if (ret) if (ret)
return ret; return ret;
...@@ -473,6 +488,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) ...@@ -473,6 +488,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
{ {
struct nouveau_fence *tmp, *fence; struct nouveau_fence *tmp, *fence;
spin_lock(&chan->fence.lock);
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true; fence->signalled = true;
list_del(&fence->entry); list_del(&fence->entry);
...@@ -482,6 +499,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) ...@@ -482,6 +499,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del); kref_put(&fence->refcount, nouveau_fence_del);
} }
spin_unlock(&chan->fence.lock);
} }
int int
......
...@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) ...@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return; return;
nvbo->gem = NULL; nvbo->gem = NULL;
if (unlikely(nvbo->cpu_filp))
ttm_bo_synccpu_write_release(bo);
if (unlikely(nvbo->pin_refcnt)) { if (unlikely(nvbo->pin_refcnt)) {
nvbo->pin_refcnt = 1; nvbo->pin_refcnt = 1;
nouveau_bo_unpin(nvbo); nouveau_bo_unpin(nvbo);
...@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) ...@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
return 0; return 0;
} }
static bool
nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->card_type >= NV_50) {
switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
case 0x0000:
case 0x1800:
case 0x2800:
case 0x4800:
case 0x7000:
case 0x7400:
case 0x7a00:
case 0xe000:
return true;
}
} else {
if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
return true;
}
NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
return false;
}
int int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data, nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
...@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, ...@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
if (req->channel_hint) {
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
file_priv, chan);
}
if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM; flags |= TTM_PL_FLAG_VRAM;
if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
...@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, ...@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM; flags |= TTM_PL_FLAG_SYSTEM;
if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL; return -EINVAL;
}
if (req->channel_hint) {
chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
if (IS_ERR(chan))
return PTR_ERR(chan);
}
ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
req->info.tile_mode, req->info.tile_flags, false, req->info.tile_mode, req->info.tile_flags, false,
(req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
&nvbo); &nvbo);
if (chan)
nouveau_channel_put(&chan);
if (ret) if (ret)
return ret; return ret;
...@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) ...@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_for_each_safe(entry, tmp, list) { list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry); nvbo = list_entry(entry, struct nouveau_bo, entry);
if (likely(fence)) {
struct nouveau_fence *prev_fence; nouveau_bo_fence(nvbo, fence);
spin_lock(&nvbo->bo.bdev->fence_lock);
prev_fence = nvbo->bo.sync_obj;
nvbo->bo.sync_obj = nouveau_fence_ref(fence);
spin_unlock(&nvbo->bo.bdev->fence_lock);
nouveau_fence_unref((void *)&prev_fence);
}
if (unlikely(nvbo->validate_mapped)) { if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap); ttm_bo_kunmap(&nvbo->kmap);
...@@ -299,14 +268,15 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -299,14 +268,15 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
return -EINVAL; return -EINVAL;
} }
ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) { if (ret) {
validate_fini(op, NULL); validate_fini(op, NULL);
if (ret == -EAGAIN) if (unlikely(ret == -EAGAIN))
ret = ttm_bo_wait_unreserved(&nvbo->bo, false); ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
drm_gem_object_unreference_unlocked(gem); drm_gem_object_unreference_unlocked(gem);
if (ret) { if (unlikely(ret)) {
NV_ERROR(dev, "fail reserve\n"); if (ret != -ERESTARTSYS)
NV_ERROR(dev, "fail reserve\n");
return ret; return ret;
} }
goto retry; goto retry;
...@@ -331,25 +301,6 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ...@@ -331,25 +301,6 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
validate_fini(op, NULL); validate_fini(op, NULL);
return -EINVAL; return -EINVAL;
} }
if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
validate_fini(op, NULL);
if (nvbo->cpu_filp == file_priv) {
NV_ERROR(dev, "bo %p mapped by process trying "
"to validate it!\n", nvbo);
return -EINVAL;
}
mutex_unlock(&drm_global_mutex);
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
mutex_lock(&drm_global_mutex);
if (ret) {
NV_ERROR(dev, "fail wait_cpu\n");
return ret;
}
goto retry;
}
} }
return 0; return 0;
...@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, ...@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
} }
nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, ret = nouveau_bo_validate(nvbo, true, false, false);
false, false, false);
nvbo->channel = NULL; nvbo->channel = NULL;
if (unlikely(ret)) { if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n"); if (ret != -ERESTARTSYS)
NV_ERROR(dev, "fail ttm_validate\n");
return ret; return ret;
} }
...@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ...@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) { if (unlikely(ret)) {
NV_ERROR(dev, "validate_init\n"); if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate_init\n");
return ret; return ret;
} }
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
NV_ERROR(dev, "validate vram_list\n"); if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL); validate_fini(op, NULL);
return ret; return ret;
} }
...@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ...@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
NV_ERROR(dev, "validate gart_list\n"); if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL); validate_fini(op, NULL);
return ret; return ret;
} }
...@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ...@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers); ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
NV_ERROR(dev, "validate both_list\n"); if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL); validate_fini(op, NULL);
return ret; return ret;
} }
...@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL; struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0; int i, j, ret = 0, do_reloc = 0;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); chan = nouveau_channel_get(dev, file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
req->vram_available = dev_priv->fb_aper_free; req->vram_available = dev_priv->fb_aper_free;
req->gart_available = dev_priv->gart_info.aper_free; req->gart_available = dev_priv->gart_info.aper_free;
...@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH); req->nr_push, NOUVEAU_GEM_MAX_PUSH);
nouveau_channel_put(&chan);
return -EINVAL; return -EINVAL;
} }
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
nouveau_channel_put(&chan);
return -EINVAL; return -EINVAL;
} }
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
nouveau_channel_put(&chan);
return -EINVAL; return -EINVAL;
} }
push = u_memcpya(req->push, req->nr_push, sizeof(*push)); push = u_memcpya(req->push, req->nr_push, sizeof(*push));
if (IS_ERR(push)) if (IS_ERR(push)) {
nouveau_channel_put(&chan);
return PTR_ERR(push); return PTR_ERR(push);
}
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
kfree(push); kfree(push);
nouveau_channel_put(&chan);
return PTR_ERR(bo); return PTR_ERR(bo);
} }
...@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc); req->nr_buffers, &op, &do_reloc);
if (ret) { if (ret) {
NV_ERROR(dev, "validate: %d\n", ret); if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate: %d\n", ret);
goto out; goto out;
} }
...@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out: out:
validate_fini(&op, fence); validate_fini(&op, fence);
nouveau_fence_unref((void**)&fence); nouveau_fence_unref(&fence);
kfree(bo); kfree(bo);
kfree(push); kfree(push);
...@@ -750,6 +714,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -750,6 +714,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->suffix1 = 0x00000000; req->suffix1 = 0x00000000;
} }
nouveau_channel_put(&chan);
return ret; return ret;
} }
...@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, ...@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
nvbo = nouveau_gem_object(gem); nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp) { spin_lock(&nvbo->bo.bdev->fence_lock);
if (nvbo->cpu_filp == file_priv) ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
goto out; spin_unlock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
if (ret)
goto out;
}
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
spin_unlock(&nvbo->bo.bdev->fence_lock);
} else {
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
if (ret == 0)
nvbo->cpu_filp = file_priv;
}
out:
drm_gem_object_unreference_unlocked(gem); drm_gem_object_unreference_unlocked(gem);
return ret; return ret;
} }
...@@ -809,26 +757,7 @@ int ...@@ -809,26 +757,7 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_nouveau_gem_cpu_prep *req = data; return 0;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
int ret = -EINVAL;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp != file_priv)
goto out;
nvbo->cpu_filp = NULL;
ttm_bo_synccpu_write_release(&nvbo->bo);
ret = 0;
out:
drm_gem_object_unreference_unlocked(gem);
return ret;
} }
int int
......
...@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head, ...@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
else else
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
...@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head, ...@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
if (dev_priv->card_type == NV_10) { if (dev_priv->card_type == NV_10) {
/* Not waiting for vertical retrace before modifying /* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */ CRE_53/CRE_54 causes lockups. */
nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
} }
wr_cio_state(dev, head, regp, NV_CIO_CRE_53); wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
...@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head, ...@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
/* Setting 1 on this value gives you interrupts for every vblank period. */ /* Enable vblank interrupts. */
NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
(dev->vblank_enabled[head] ? 1 : 0));
NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, ...@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
if (dev_priv->card_type < NV_40) { if (dev_priv->card_type < NV_40) {
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
} else } else
if (dev_priv->card_type < NV_50) { if (dev_priv->card_type < NV_50) {
ctx = (gpuobj->cinst >> 4) | ctx = (gpuobj->pinst >> 4) |
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else { } else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
ctx = (gpuobj->cinst << 10) | 2; ctx = (gpuobj->cinst << 10) | chan->id;
} else { } else {
ctx = (gpuobj->cinst >> 4) | ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine << ((gpuobj->engine <<
...@@ -214,18 +214,19 @@ nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) ...@@ -214,18 +214,19 @@ nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
spin_unlock_irqrestore(&chan->ramht->lock, flags); spin_unlock_irqrestore(&chan->ramht->lock, flags);
} }
void int
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
{ {
struct nouveau_ramht_entry *entry; struct nouveau_ramht_entry *entry;
entry = nouveau_ramht_remove_entry(chan, handle); entry = nouveau_ramht_remove_entry(chan, handle);
if (!entry) if (!entry)
return; return -ENOENT;
nouveau_ramht_remove_hash(chan, entry->handle); nouveau_ramht_remove_hash(chan, entry->handle);
nouveau_gpuobj_ref(NULL, &entry->gpuobj); nouveau_gpuobj_ref(NULL, &entry->gpuobj);
kfree(entry); kfree(entry);
return 0;
} }
struct nouveau_gpuobj * struct nouveau_gpuobj *
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment