Commit df0af440 authored by Christian König's avatar Christian König

drm/radeon: remove struct radeon_bo_list

Just move all fields into radeon_cs_reloc, removing unused/duplicated fields.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4d152646
This diff is collapsed.
...@@ -1274,12 +1274,12 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p, ...@@ -1274,12 +1274,12 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
value = radeon_get_ib_value(p, idx); value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff; tmp = value & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10); tmp += (((u32)reloc->gpu_offset) >> 10);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO; tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { if (reloc->tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) { if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n"); DRM_ERROR("Cannot src blit from microtiled surface\n");
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
...@@ -1325,7 +1325,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, ...@@ -1325,7 +1325,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
return r; return r;
} }
idx_value = radeon_get_ib_value(p, idx); idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
track->arrays[i + 0].esize = idx_value >> 8; track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj; track->arrays[i + 0].robj = reloc->robj;
...@@ -1337,7 +1337,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, ...@@ -1337,7 +1337,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
track->arrays[i + 1].robj = reloc->robj; track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24; track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F; track->arrays[i + 1].esize &= 0x7F;
...@@ -1351,7 +1351,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p, ...@@ -1351,7 +1351,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
return r; return r;
} }
idx_value = radeon_get_ib_value(p, idx); idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
track->arrays[i + 0].robj = reloc->robj; track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8; track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F; track->arrays[i + 0].esize &= 0x7F;
...@@ -1594,7 +1594,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1594,7 +1594,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb.robj = reloc->robj; track->zb.robj = reloc->robj;
track->zb.offset = idx_value; track->zb.offset = idx_value;
track->zb_dirty = true; track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case RADEON_RB3D_COLOROFFSET: case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0); r = radeon_cs_packet_next_reloc(p, &reloc, 0);
...@@ -1607,7 +1607,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1607,7 +1607,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->cb[0].robj = reloc->robj; track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value; track->cb[0].offset = idx_value;
track->cb_dirty = true; track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case RADEON_PP_TXOFFSET_0: case RADEON_PP_TXOFFSET_0:
case RADEON_PP_TXOFFSET_1: case RADEON_PP_TXOFFSET_1:
...@@ -1621,16 +1621,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1621,16 +1621,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_TXO_MACRO_TILE; tile_flags |= RADEON_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_TXO_MICRO_TILE_X2; tile_flags |= RADEON_TXO_MICRO_TILE_X2;
tmp = idx_value & ~(0x7 << 2); tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags; tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); ib[idx] = tmp + ((u32)reloc->gpu_offset);
} else } else
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].robj = reloc->robj; track->textures[i].robj = reloc->robj;
track->tex_dirty = true; track->tex_dirty = true;
break; break;
...@@ -1648,7 +1648,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1648,7 +1648,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->textures[0].cube_info[i].offset = idx_value; track->textures[0].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj; track->textures[0].cube_info[i].robj = reloc->robj;
track->tex_dirty = true; track->tex_dirty = true;
break; break;
...@@ -1666,7 +1666,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1666,7 +1666,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->textures[1].cube_info[i].offset = idx_value; track->textures[1].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj; track->textures[1].cube_info[i].robj = reloc->robj;
track->tex_dirty = true; track->tex_dirty = true;
break; break;
...@@ -1684,7 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1684,7 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->textures[2].cube_info[i].offset = idx_value; track->textures[2].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj; track->textures[2].cube_info[i].robj = reloc->robj;
track->tex_dirty = true; track->tex_dirty = true;
break; break;
...@@ -1702,9 +1702,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1702,9 +1702,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE; tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = idx_value & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
...@@ -1772,7 +1772,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ...@@ -1772,7 +1772,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case RADEON_PP_CNTL: case RADEON_PP_CNTL:
{ {
...@@ -1932,7 +1932,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, ...@@ -1932,7 +1932,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) { if (r) {
return r; return r;
...@@ -1946,7 +1946,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, ...@@ -1946,7 +1946,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
track->num_arrays = 1; track->num_arrays = 1;
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2)); track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
......
...@@ -185,7 +185,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, ...@@ -185,7 +185,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb.robj = reloc->robj; track->zb.robj = reloc->robj;
track->zb.offset = idx_value; track->zb.offset = idx_value;
track->zb_dirty = true; track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case RADEON_RB3D_COLOROFFSET: case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0); r = radeon_cs_packet_next_reloc(p, &reloc, 0);
...@@ -198,7 +198,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, ...@@ -198,7 +198,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->cb[0].robj = reloc->robj; track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value; track->cb[0].offset = idx_value;
track->cb_dirty = true; track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case R200_PP_TXOFFSET_0: case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1: case R200_PP_TXOFFSET_1:
...@@ -215,16 +215,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, ...@@ -215,16 +215,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE; tile_flags |= R200_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE; tile_flags |= R200_TXO_MICRO_TILE;
tmp = idx_value & ~(0x7 << 2); tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags; tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset); ib[idx] = tmp + ((u32)reloc->gpu_offset);
} else } else
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].robj = reloc->robj; track->textures[i].robj = reloc->robj;
track->tex_dirty = true; track->tex_dirty = true;
break; break;
...@@ -268,7 +268,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, ...@@ -268,7 +268,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
track->textures[i].cube_info[face - 1].offset = idx_value; track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj; track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true; track->tex_dirty = true;
break; break;
...@@ -287,9 +287,9 @@ int r200_packet0_check(struct radeon_cs_parser *p, ...@@ -287,9 +287,9 @@ int r200_packet0_check(struct radeon_cs_parser *p,
} }
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE; tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = idx_value & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
...@@ -362,7 +362,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, ...@@ -362,7 +362,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case RADEON_PP_CNTL: case RADEON_PP_CNTL:
{ {
......
...@@ -640,7 +640,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ...@@ -640,7 +640,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb[i].robj = reloc->robj; track->cb[i].robj = reloc->robj;
track->cb[i].offset = idx_value; track->cb[i].offset = idx_value;
track->cb_dirty = true; track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case R300_ZB_DEPTHOFFSET: case R300_ZB_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0); r = radeon_cs_packet_next_reloc(p, &reloc, 0);
...@@ -653,7 +653,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ...@@ -653,7 +653,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->zb.robj = reloc->robj; track->zb.robj = reloc->robj;
track->zb.offset = idx_value; track->zb.offset = idx_value;
track->zb_dirty = true; track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case R300_TX_OFFSET_0: case R300_TX_OFFSET_0:
case R300_TX_OFFSET_0+4: case R300_TX_OFFSET_0+4:
...@@ -682,16 +682,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ...@@ -682,16 +682,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); ((idx_value & ~31) + (u32)reloc->gpu_offset);
} else { } else {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE; tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE; tile_flags |= R300_TXO_MICRO_TILE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_TXO_MICRO_TILE_SQUARE; tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset); tmp = idx_value + ((u32)reloc->gpu_offset);
tmp |= tile_flags; tmp |= tile_flags;
ib[idx] = tmp; ib[idx] = tmp;
} }
...@@ -753,11 +753,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ...@@ -753,11 +753,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_COLOR_TILE_ENABLE; tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE; tile_flags |= R300_COLOR_MICROTILE_ENABLE;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
...@@ -838,11 +838,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ...@@ -838,11 +838,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r; return r;
} }
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE; tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED; tile_flags |= R300_DEPTHMICROTILE_TILED;
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16); tmp = idx_value & ~(0x7 << 16);
...@@ -1052,7 +1052,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ...@@ -1052,7 +1052,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case 0x4e0c: case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */ /* RB3D_COLOR_CHANNEL_MASK */
...@@ -1097,7 +1097,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ...@@ -1097,7 +1097,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->aa.robj = reloc->robj; track->aa.robj = reloc->robj;
track->aa.offset = idx_value; track->aa.offset = idx_value;
track->aa_dirty = true; track->aa_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); ib[idx] = idx_value + ((u32)reloc->gpu_offset);
break; break;
case R300_RB3D_AARESOLVE_PITCH: case R300_RB3D_AARESOLVE_PITCH:
track->aa.pitch = idx_value & 0x3FFE; track->aa.pitch = idx_value & 0x3FFE;
...@@ -1162,7 +1162,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p, ...@@ -1162,7 +1162,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
radeon_cs_dump_packet(p, pkt); radeon_cs_dump_packet(p, pkt);
return r; return r;
} }
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) { if (r) {
return r; return r;
......
This diff is collapsed.
...@@ -479,15 +479,6 @@ struct radeon_bo { ...@@ -479,15 +479,6 @@ struct radeon_bo {
}; };
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
struct radeon_bo_list {
struct ttm_validate_buffer tv;
struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned domain;
unsigned alt_domain;
u32 tiling_flags;
};
int radeon_gem_debugfs_init(struct radeon_device *rdev); int radeon_gem_debugfs_init(struct radeon_device *rdev);
/* sub-allocation manager, it has to be protected by another lock. /* sub-allocation manager, it has to be protected by another lock.
...@@ -987,9 +978,12 @@ void cayman_dma_fini(struct radeon_device *rdev); ...@@ -987,9 +978,12 @@ void cayman_dma_fini(struct radeon_device *rdev);
struct radeon_cs_reloc { struct radeon_cs_reloc {
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct radeon_bo *robj; struct radeon_bo *robj;
struct radeon_bo_list lobj; struct ttm_validate_buffer tv;
uint64_t gpu_offset;
unsigned domain;
unsigned alt_domain;
uint32_t tiling_flags;
uint32_t handle; uint32_t handle;
uint32_t flags;
}; };
struct radeon_cs_chunk { struct radeon_cs_chunk {
...@@ -1013,7 +1007,7 @@ struct radeon_cs_parser { ...@@ -1013,7 +1007,7 @@ struct radeon_cs_parser {
unsigned nrelocs; unsigned nrelocs;
struct radeon_cs_reloc *relocs; struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr; struct radeon_cs_reloc **relocs_ptr;
struct radeon_bo_list *vm_bos; struct radeon_cs_reloc *vm_bos;
struct list_head validated; struct list_head validated;
unsigned dma_reloc_idx; unsigned dma_reloc_idx;
/* indices of various chunks */ /* indices of various chunks */
...@@ -2803,9 +2797,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev); ...@@ -2803,9 +2797,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev); void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct list_head *head); struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring); struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev, void radeon_vm_flush(struct radeon_device *rdev,
......
...@@ -125,7 +125,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -125,7 +125,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
} }
p->relocs_ptr[i] = &p->relocs[i]; p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
p->relocs[i].lobj.bo = p->relocs[i].robj;
/* The userspace buffer priorities are from 0 to 15. A higher /* The userspace buffer priorities are from 0 to 15. A higher
* number means the buffer is more important. * number means the buffer is more important.
...@@ -141,10 +140,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -141,10 +140,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (p->ring == R600_RING_TYPE_UVD_INDEX && if (p->ring == R600_RING_TYPE_UVD_INDEX &&
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
/* TODO: is this still needed for NI+ ? */ /* TODO: is this still needed for NI+ ? */
p->relocs[i].lobj.domain = p->relocs[i].domain =
RADEON_GEM_DOMAIN_VRAM; RADEON_GEM_DOMAIN_VRAM;
p->relocs[i].lobj.alt_domain = p->relocs[i].alt_domain =
RADEON_GEM_DOMAIN_VRAM; RADEON_GEM_DOMAIN_VRAM;
/* prioritize this over any other relocation */ /* prioritize this over any other relocation */
...@@ -153,16 +152,16 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -153,16 +152,16 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
uint32_t domain = r->write_domain ? uint32_t domain = r->write_domain ?
r->write_domain : r->read_domains; r->write_domain : r->read_domains;
p->relocs[i].lobj.domain = domain; p->relocs[i].domain = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM) if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT; domain |= RADEON_GEM_DOMAIN_GTT;
p->relocs[i].lobj.alt_domain = domain; p->relocs[i].alt_domain = domain;
} }
p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].handle = r->handle; p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].lobj.tv.head, radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority); priority);
} }
...@@ -356,11 +355,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) ...@@ -356,11 +355,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
static int cmp_size_smaller_first(void *priv, struct list_head *a, static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b) struct list_head *b)
{ {
struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
/* Sort A before B if A is smaller. */ /* Sort A before B if A is smaller. */
return (int)la->bo->tbo.num_pages - (int)lb->bo->tbo.num_pages; return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
} }
/** /**
...@@ -786,9 +785,9 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, ...@@ -786,9 +785,9 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
/* FIXME: we assume reloc size is 4 dwords */ /* FIXME: we assume reloc size is 4 dwords */
if (nomm) { if (nomm) {
*cs_reloc = p->relocs; *cs_reloc = p->relocs;
(*cs_reloc)->lobj.gpu_offset = (*cs_reloc)->gpu_offset =
(u64)relocs_chunk->kdata[idx + 3] << 32; (u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
} else } else
*cs_reloc = p->relocs_ptr[(idx / 4)]; *cs_reloc = p->relocs_ptr[(idx / 4)];
return 0; return 0;
......
...@@ -422,7 +422,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, ...@@ -422,7 +422,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket, struct ww_acquire_ctx *ticket,
struct list_head *head, int ring) struct list_head *head, int ring)
{ {
struct radeon_bo_list *lobj; struct radeon_cs_reloc *lobj;
struct radeon_bo *bo; struct radeon_bo *bo;
int r; int r;
u64 bytes_moved = 0, initial_bytes_moved; u64 bytes_moved = 0, initial_bytes_moved;
...@@ -434,7 +434,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, ...@@ -434,7 +434,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
} }
list_for_each_entry(lobj, head, tv.head) { list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo; bo = lobj->robj;
if (!bo->pin_count) { if (!bo->pin_count) {
u32 domain = lobj->domain; u32 domain = lobj->domain;
u32 current_domain = u32 current_domain =
......
...@@ -453,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, ...@@ -453,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
} }
reloc = p->relocs_ptr[(idx / 4)]; reloc = p->relocs_ptr[(idx / 4)];
start = reloc->lobj.gpu_offset; start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj); end = start + radeon_bo_size(reloc->robj);
start += offset; start += offset;
......
...@@ -461,7 +461,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) ...@@ -461,7 +461,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
return -EINVAL; return -EINVAL;
} }
offset += p->relocs_ptr[(idx / 4)]->lobj.gpu_offset; offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
p->ib.ptr[lo] = offset & 0xFFFFFFFF; p->ib.ptr[lo] = offset & 0xFFFFFFFF;
p->ib.ptr[hi] = offset >> 32; p->ib.ptr[hi] = offset >> 32;
......
...@@ -125,33 +125,39 @@ void radeon_vm_manager_fini(struct radeon_device *rdev) ...@@ -125,33 +125,39 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
* Add the page directory to the list of BOs to * Add the page directory to the list of BOs to
* validate for command submission (cayman+). * validate for command submission (cayman+).
*/ */
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct list_head *head) struct list_head *head)
{ {
struct radeon_bo_list *list; struct radeon_cs_reloc *list;
unsigned i, idx, size; unsigned i, idx, size;
size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_bo_list); size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
list = kmalloc(size, GFP_KERNEL); list = kmalloc(size, GFP_KERNEL);
if (!list) if (!list)
return NULL; return NULL;
/* add the vm page table to the list */ /* add the vm page table to the list */
list[0].bo = vm->page_directory; list[0].gobj = NULL;
list[0].robj = vm->page_directory;
list[0].domain = RADEON_GEM_DOMAIN_VRAM; list[0].domain = RADEON_GEM_DOMAIN_VRAM;
list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM; list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo; list[0].tv.bo = &vm->page_directory->tbo;
list[0].tiling_flags = 0;
list[0].handle = 0;
list_add(&list[0].tv.head, head); list_add(&list[0].tv.head, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo) if (!vm->page_tables[i].bo)
continue; continue;
list[idx].bo = vm->page_tables[i].bo; list[idx].gobj = NULL;
list[idx].robj = vm->page_tables[i].bo;
list[idx].domain = RADEON_GEM_DOMAIN_VRAM; list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM; list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].bo->tbo; list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tiling_flags = 0;
list[idx].handle = 0;
list_add(&list[idx++].tv.head, head); list_add(&list[idx++].tv.head, head);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment