Commit 71776b6d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: cleanup mtype mapping

Unify how we map the UAPI flags to the PTE hardware flags for a mapping.

Only the MTYPE is actually ASIC dependent, all other flags should be
copied over 1 to 1 and ASIC differences are handled later on.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1dd077bb
...@@ -381,7 +381,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) ...@@ -381,7 +381,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
} }
return amdgpu_gmc_get_pte_flags(adev, mapping_flags); return amdgpu_gem_va_map_flags(adev, mapping_flags);
} }
/* add_bo_to_vm - Add a BO to a VM /* add_bo_to_vm - Add a BO to a VM
......
...@@ -532,6 +532,34 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -532,6 +532,34 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
DRM_ERROR("Couldn't update BO_VA (%d)\n", r); DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
} }
/**
* amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
*
* @adev: amdgpu_device pointer
* @flags: GEM UAPI flags
*
* Returns the GEM UAPI flags mapped into hardware for the ASIC.
*/
uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
{
uint64_t pte_flag = 0;
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
pte_flag |= AMDGPU_PTE_EXECUTABLE;
if (flags & AMDGPU_VM_PAGE_READABLE)
pte_flag |= AMDGPU_PTE_READABLE;
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
if (adev->gmc.gmc_funcs->map_mtype)
pte_flag |= amdgpu_gmc_map_mtype(adev,
flags & AMDGPU_VM_MTYPE_MASK);
return pte_flag;
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
...@@ -629,7 +657,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -629,7 +657,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) { switch (args->operation) {
case AMDGPU_VA_OP_MAP: case AMDGPU_VA_OP_MAP:
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);
...@@ -644,7 +672,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -644,7 +672,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size); args->map_size);
break; break;
case AMDGPU_VA_OP_REPLACE: case AMDGPU_VA_OP_REPLACE:
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);
......
...@@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, ...@@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
......
...@@ -99,9 +99,8 @@ struct amdgpu_gmc_funcs { ...@@ -99,9 +99,8 @@ struct amdgpu_gmc_funcs {
unsigned pasid); unsigned pasid);
/* enable/disable PRT support */ /* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable); void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */ /* map mtype to hardware flags */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
uint32_t flags);
/* get the pde for a given mc addr */ /* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level, void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags); u64 *dst, u64 *flags);
...@@ -184,8 +183,8 @@ struct amdgpu_gmc { ...@@ -184,8 +183,8 @@ struct amdgpu_gmc {
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
/** /**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
......
...@@ -1571,8 +1571,10 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, ...@@ -1571,8 +1571,10 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE; flags &= ~AMDGPU_PTE_WRITEABLE;
flags &= ~AMDGPU_PTE_EXECUTABLE; if (adev->asic_type >= CHIP_TONGA) {
flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; flags &= ~AMDGPU_PTE_EXECUTABLE;
flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
}
if (adev->asic_type >= CHIP_NAVI10) { if (adev->asic_type >= CHIP_NAVI10) {
flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
......
...@@ -397,43 +397,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid ...@@ -397,43 +397,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 1 system * 1 system
* 0 valid * 0 valid
*/ */
static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
uint64_t pte_flag = 0;
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
pte_flag |= AMDGPU_PTE_EXECUTABLE;
if (flags & AMDGPU_VM_PAGE_READABLE)
pte_flag |= AMDGPU_PTE_READABLE;
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
switch (flags & AMDGPU_VM_MTYPE_MASK) { static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
{
switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT: case AMDGPU_VM_MTYPE_DEFAULT:
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_NC: case AMDGPU_VM_MTYPE_NC:
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_WC: case AMDGPU_VM_MTYPE_WC:
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
break;
case AMDGPU_VM_MTYPE_CC: case AMDGPU_VM_MTYPE_CC:
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
break;
case AMDGPU_VM_MTYPE_UC: case AMDGPU_VM_MTYPE_UC:
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
break;
default: default:
pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
break;
} }
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
return pte_flag;
} }
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
...@@ -464,7 +444,7 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { ...@@ -464,7 +444,7 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags, .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde .get_vm_pde = gmc_v10_0_get_vm_pde
}; };
......
...@@ -386,21 +386,6 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, ...@@ -386,21 +386,6 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr; return pd_addr;
} }
static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
uint64_t pte_flag = 0;
if (flags & AMDGPU_VM_PAGE_READABLE)
pte_flag |= AMDGPU_PTE_READABLE;
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
return pte_flag;
}
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags) uint64_t *addr, uint64_t *flags)
{ {
...@@ -1153,7 +1138,6 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { ...@@ -1153,7 +1138,6 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt, .set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde, .get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
}; };
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
......
...@@ -463,21 +463,6 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, ...@@ -463,21 +463,6 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
} }
static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
uint64_t pte_flag = 0;
if (flags & AMDGPU_VM_PAGE_READABLE)
pte_flag |= AMDGPU_PTE_READABLE;
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
return pte_flag;
}
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags) uint64_t *addr, uint64_t *flags)
{ {
...@@ -1343,7 +1328,6 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { ...@@ -1343,7 +1328,6 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt, .set_prt = gmc_v7_0_set_prt,
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
.get_vm_pde = gmc_v7_0_get_vm_pde .get_vm_pde = gmc_v7_0_get_vm_pde
}; };
......
...@@ -686,23 +686,6 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, ...@@ -686,23 +686,6 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid * 0 valid
*/ */
static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
uint64_t pte_flag = 0;
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
pte_flag |= AMDGPU_PTE_EXECUTABLE;
if (flags & AMDGPU_VM_PAGE_READABLE)
pte_flag |= AMDGPU_PTE_READABLE;
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
return pte_flag;
}
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags) uint64_t *addr, uint64_t *flags)
{ {
...@@ -1711,7 +1694,6 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { ...@@ -1711,7 +1694,6 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt, .set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
.get_vm_pde = gmc_v8_0_get_vm_pde .get_vm_pde = gmc_v8_0_get_vm_pde
}; };
......
...@@ -608,47 +608,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, ...@@ -608,47 +608,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid * 0 valid
*/ */
static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
uint32_t flags)
{ {
uint64_t pte_flag = 0; switch (flags) {
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
pte_flag |= AMDGPU_PTE_EXECUTABLE;
if (flags & AMDGPU_VM_PAGE_READABLE)
pte_flag |= AMDGPU_PTE_READABLE;
if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE;
switch (flags & AMDGPU_VM_MTYPE_MASK) {
case AMDGPU_VM_MTYPE_DEFAULT: case AMDGPU_VM_MTYPE_DEFAULT:
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_NC: case AMDGPU_VM_MTYPE_NC:
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_WC: case AMDGPU_VM_MTYPE_WC:
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
break;
case AMDGPU_VM_MTYPE_RW: case AMDGPU_VM_MTYPE_RW:
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
break;
case AMDGPU_VM_MTYPE_CC: case AMDGPU_VM_MTYPE_CC:
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
break;
case AMDGPU_VM_MTYPE_UC: case AMDGPU_VM_MTYPE_UC:
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
break;
default: default:
pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
break;
} }
if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT;
return pte_flag;
} }
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
...@@ -679,7 +657,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { ...@@ -679,7 +657,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde .get_vm_pde = gmc_v9_0_get_vm_pde
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment