Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
6a92ef08
Commit
6a92ef08
authored
Aug 11, 2018
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
parents
9a95d9c6
ec0c9671
Changes
19
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
92 additions
and
71 deletions
+92
-71
MAINTAINERS
MAINTAINERS
+1
-1
arch/parisc/Kconfig
arch/parisc/Kconfig
+0
-1
arch/s390/Kconfig
arch/s390/Kconfig
+0
-1
drivers/block/zram/zram_drv.c
drivers/block/zram/zram_drv.c
+14
-1
drivers/i2c/busses/i2c-xlp9xx.c
drivers/i2c/busses/i2c-xlp9xx.c
+28
-13
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.c
+11
-14
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.c
+1
-1
drivers/net/xen-netfront.c
drivers/net/xen-netfront.c
+4
-4
kernel/bpf/cpumap.c
kernel/bpf/cpumap.c
+9
-6
kernel/bpf/devmap.c
kernel/bpf/devmap.c
+9
-5
kernel/bpf/sockmap.c
kernel/bpf/sockmap.c
+6
-3
lib/Kconfig.ubsan
lib/Kconfig.ubsan
+0
-11
mm/memory.c
mm/memory.c
+3
-0
samples/bpf/xdp_redirect_cpu_kern.c
samples/bpf/xdp_redirect_cpu_kern.c
+1
-1
samples/bpf/xdp_redirect_cpu_user.c
samples/bpf/xdp_redirect_cpu_user.c
+2
-2
scripts/Makefile.ubsan
scripts/Makefile.ubsan
+0
-4
tools/lib/bpf/btf.c
tools/lib/bpf/btf.c
+1
-1
tools/lib/bpf/btf.h
tools/lib/bpf/btf.h
+1
-1
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/bpf/test_sockmap.c
+1
-1
No files found.
MAINTAINERS
View file @
6a92ef08
...
...
@@ -5930,7 +5930,7 @@ F: Documentation/dev-tools/gcov.rst
GDB KERNEL DEBUGGING HELPER SCRIPTS
M: Jan Kiszka <jan.kiszka@siemens.com>
M: Kieran Bingham <k
ieran@bingham.xyz
>
M: Kieran Bingham <k
bingham@kernel.org
>
S: Supported
F: scripts/gdb/
...
...
arch/parisc/Kconfig
View file @
6a92ef08
...
...
@@ -11,7 +11,6 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_SUPPORTS_MEMORY_FAILURE
select RTC_CLASS
select RTC_DRV_GENERIC
...
...
arch/s390/Kconfig
View file @
6a92ef08
...
...
@@ -106,7 +106,6 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
...
...
drivers/block/zram/zram_drv.c
View file @
6a92ef08
...
...
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
zram
->
backing_dev
=
NULL
;
zram
->
old_block_size
=
0
;
zram
->
bdev
=
NULL
;
zram
->
disk
->
queue
->
backing_dev_info
->
capabilities
|=
BDI_CAP_SYNCHRONOUS_IO
;
kvfree
(
zram
->
bitmap
);
zram
->
bitmap
=
NULL
;
}
...
...
@@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
zram
->
backing_dev
=
backing_dev
;
zram
->
bitmap
=
bitmap
;
zram
->
nr_pages
=
nr_pages
;
/*
* With writeback feature, zram does asynchronous IO so it's no longer
* synchronous device so let's remove synchronous io flag. Othewise,
* upper layer(e.g., swap) could wait IO completion rather than
* (submit and return), which will cause system sluggish.
* Furthermore, when the IO function returns(e.g., swap_readpage),
* upper layer expects IO was done so it could deallocate the page
* freely but in fact, IO is going on so finally could cause
* use-after-free when the IO is really done.
*/
zram
->
disk
->
queue
->
backing_dev_info
->
capabilities
&=
~
BDI_CAP_SYNCHRONOUS_IO
;
up_write
(
&
zram
->
init_lock
);
pr_info
(
"setup backing device %s
\n
"
,
file_name
);
...
...
drivers/i2c/busses/i2c-xlp9xx.c
View file @
6a92ef08
...
...
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
if
(
priv
->
len_recv
)
{
/* read length byte */
rlen
=
xlp9xx_read_i2c_reg
(
priv
,
XLP9XX_I2C_MRXFIFO
);
/*
* We expect at least 2 interrupts for I2C_M_RECV_LEN
* transactions. The length is updated during the first
* interrupt, and the buffer contents are only copied
* during subsequent interrupts. If in case the interrupts
* get merged we would complete the transaction without
* copying out the bytes from RX fifo. To avoid this now we
* drain the fifo as and when data is available.
* We drained the rlen byte already, decrement total length
* by one.
*/
len
--
;
if
(
rlen
>
I2C_SMBUS_BLOCK_MAX
||
rlen
==
0
)
{
rlen
=
0
;
/*abort transfer */
priv
->
msg_buf_remaining
=
0
;
priv
->
msg_len
=
0
;
}
else
{
*
buf
++
=
rlen
;
if
(
priv
->
client_pec
)
++
rlen
;
/* account for error check byte */
/* update remaining bytes and message length */
priv
->
msg_buf_remaining
=
rlen
;
priv
->
msg_len
=
rlen
+
1
;
xlp9xx_i2c_update_rlen
(
priv
);
return
;
}
*
buf
++
=
rlen
;
if
(
priv
->
client_pec
)
++
rlen
;
/* account for error check byte */
/* update remaining bytes and message length */
priv
->
msg_buf_remaining
=
rlen
;
priv
->
msg_len
=
rlen
+
1
;
xlp9xx_i2c_update_rlen
(
priv
);
priv
->
len_recv
=
false
;
}
else
{
len
=
min
(
priv
->
msg_buf_remaining
,
len
);
for
(
i
=
0
;
i
<
len
;
i
++
,
buf
++
)
*
buf
=
xlp9xx_read_i2c_reg
(
priv
,
XLP9XX_I2C_MRXFIFO
);
priv
->
msg_buf_remaining
-=
len
;
}
len
=
min
(
priv
->
msg_buf_remaining
,
len
);
for
(
i
=
0
;
i
<
len
;
i
++
,
buf
++
)
*
buf
=
xlp9xx_read_i2c_reg
(
priv
,
XLP9XX_I2C_MRXFIFO
);
priv
->
msg_buf_remaining
-=
len
;
priv
->
msg_buf
=
buf
;
if
(
priv
->
msg_buf_remaining
)
...
...
drivers/net/ethernet/ti/cpsw.c
View file @
6a92ef08
...
...
@@ -2358,14 +2358,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
int
i
;
for
(
i
=
0
;
i
<
cpsw
->
data
.
slaves
;
i
++
)
{
if
(
vid
==
cpsw
->
slaves
[
i
].
port_vlan
)
return
-
EINVAL
;
if
(
vid
==
cpsw
->
slaves
[
i
].
port_vlan
)
{
ret
=
-
EINVAL
;
goto
err
;
}
}
}
dev_info
(
priv
->
dev
,
"Adding vlanid %d to vlan filter
\n
"
,
vid
);
ret
=
cpsw_add_vlan_ale_entry
(
priv
,
vid
);
err:
pm_runtime_put
(
cpsw
->
dev
);
return
ret
;
}
...
...
@@ -2391,22 +2393,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
for
(
i
=
0
;
i
<
cpsw
->
data
.
slaves
;
i
++
)
{
if
(
vid
==
cpsw
->
slaves
[
i
].
port_vlan
)
return
-
EINVAL
;
goto
err
;
}
}
dev_info
(
priv
->
dev
,
"removing vlanid %d from vlan filter
\n
"
,
vid
);
ret
=
cpsw_ale_del_vlan
(
cpsw
->
ale
,
vid
,
0
);
if
(
ret
!=
0
)
return
ret
;
ret
=
cpsw_ale_del_ucast
(
cpsw
->
ale
,
priv
->
mac_addr
,
HOST_PORT_NUM
,
ALE_VLAN
,
vid
);
if
(
ret
!=
0
)
return
ret
;
ret
=
cpsw_ale_del_mcast
(
cpsw
->
ale
,
priv
->
ndev
->
broadcast
,
0
,
ALE_VLAN
,
vid
);
ret
|=
cpsw_ale_del_ucast
(
cpsw
->
ale
,
priv
->
mac_addr
,
HOST_PORT_NUM
,
ALE_VLAN
,
vid
);
ret
|=
cpsw_ale_del_mcast
(
cpsw
->
ale
,
priv
->
ndev
->
broadcast
,
0
,
ALE_VLAN
,
vid
);
err:
pm_runtime_put
(
cpsw
->
dev
);
return
ret
;
}
...
...
drivers/net/ethernet/ti/cpsw_ale.c
View file @
6a92ef08
...
...
@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
idx
=
cpsw_ale_match_addr
(
ale
,
addr
,
(
flags
&
ALE_VLAN
)
?
vid
:
0
);
if
(
idx
<
0
)
return
-
E
INVAL
;
return
-
E
NOENT
;
cpsw_ale_read
(
ale
,
idx
,
ale_entry
);
...
...
drivers/net/xen-netfront.c
View file @
6a92ef08
...
...
@@ -895,7 +895,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct
sk_buff
*
skb
,
struct
sk_buff_head
*
list
)
{
struct
skb_shared_info
*
shinfo
=
skb_shinfo
(
skb
);
RING_IDX
cons
=
queue
->
rx
.
rsp_cons
;
struct
sk_buff
*
nskb
;
...
...
@@ -904,15 +903,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
RING_GET_RESPONSE
(
&
queue
->
rx
,
++
cons
);
skb_frag_t
*
nfrag
=
&
skb_shinfo
(
nskb
)
->
frags
[
0
];
if
(
s
hinfo
->
nr_frags
==
MAX_SKB_FRAGS
)
{
if
(
s
kb_shinfo
(
skb
)
->
nr_frags
==
MAX_SKB_FRAGS
)
{
unsigned
int
pull_to
=
NETFRONT_SKB_CB
(
skb
)
->
pull_to
;
BUG_ON
(
pull_to
<=
skb_headlen
(
skb
));
__pskb_pull_tail
(
skb
,
pull_to
-
skb_headlen
(
skb
));
}
BUG_ON
(
s
hinfo
->
nr_frags
>=
MAX_SKB_FRAGS
);
BUG_ON
(
s
kb_shinfo
(
skb
)
->
nr_frags
>=
MAX_SKB_FRAGS
);
skb_add_rx_frag
(
skb
,
shinfo
->
nr_frags
,
skb_frag_page
(
nfrag
),
skb_add_rx_frag
(
skb
,
skb_shinfo
(
skb
)
->
nr_frags
,
skb_frag_page
(
nfrag
),
rx
->
offset
,
rx
->
status
,
PAGE_SIZE
);
skb_shinfo
(
nskb
)
->
nr_frags
=
0
;
...
...
kernel/bpf/cpumap.c
View file @
6a92ef08
...
...
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
};
static
int
bq_flush_to_queue
(
struct
bpf_cpu_map_entry
*
rcpu
,
struct
xdp_bulk_queue
*
bq
);
struct
xdp_bulk_queue
*
bq
,
bool
in_napi_ctx
);
static
u64
cpu_map_bitmap_size
(
const
union
bpf_attr
*
attr
)
{
...
...
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
struct
xdp_bulk_queue
*
bq
=
per_cpu_ptr
(
rcpu
->
bulkq
,
cpu
);
/* No concurrent bq_enqueue can run at this point */
bq_flush_to_queue
(
rcpu
,
bq
);
bq_flush_to_queue
(
rcpu
,
bq
,
false
);
}
free_percpu
(
rcpu
->
bulkq
);
/* Cannot kthread_stop() here, last put free rcpu resources */
...
...
@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
};
static
int
bq_flush_to_queue
(
struct
bpf_cpu_map_entry
*
rcpu
,
struct
xdp_bulk_queue
*
bq
)
struct
xdp_bulk_queue
*
bq
,
bool
in_napi_ctx
)
{
unsigned
int
processed
=
0
,
drops
=
0
;
const
int
to_cpu
=
rcpu
->
cpu
;
...
...
@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
err
=
__ptr_ring_produce
(
q
,
xdpf
);
if
(
err
)
{
drops
++
;
xdp_return_frame_rx_napi
(
xdpf
);
if
(
likely
(
in_napi_ctx
))
xdp_return_frame_rx_napi
(
xdpf
);
else
xdp_return_frame
(
xdpf
);
}
processed
++
;
}
...
...
@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
struct
xdp_bulk_queue
*
bq
=
this_cpu_ptr
(
rcpu
->
bulkq
);
if
(
unlikely
(
bq
->
count
==
CPU_MAP_BULK_SIZE
))
bq_flush_to_queue
(
rcpu
,
bq
);
bq_flush_to_queue
(
rcpu
,
bq
,
true
);
/* Notice, xdp_buff/page MUST be queued here, long enough for
* driver to code invoking us to finished, due to driver
...
...
@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
/* Flush all frames in bulkq to real queue */
bq
=
this_cpu_ptr
(
rcpu
->
bulkq
);
bq_flush_to_queue
(
rcpu
,
bq
);
bq_flush_to_queue
(
rcpu
,
bq
,
true
);
/* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process
(
rcpu
->
kthread
);
...
...
kernel/bpf/devmap.c
View file @
6a92ef08
...
...
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
}
static
int
bq_xmit_all
(
struct
bpf_dtab_netdev
*
obj
,
struct
xdp_bulk_queue
*
bq
,
u32
flags
)
struct
xdp_bulk_queue
*
bq
,
u32
flags
,
bool
in_napi_ctx
)
{
struct
net_device
*
dev
=
obj
->
dev
;
int
sent
=
0
,
drops
=
0
,
err
=
0
;
...
...
@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct
xdp_frame
*
xdpf
=
bq
->
q
[
i
];
/* RX path under NAPI protection, can return frames faster */
xdp_return_frame_rx_napi
(
xdpf
);
if
(
likely
(
in_napi_ctx
))
xdp_return_frame_rx_napi
(
xdpf
);
else
xdp_return_frame
(
xdpf
);
drops
++
;
}
goto
out
;
...
...
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit
(
bit
,
bitmap
);
bq
=
this_cpu_ptr
(
dev
->
bulkq
);
bq_xmit_all
(
dev
,
bq
,
XDP_XMIT_FLUSH
);
bq_xmit_all
(
dev
,
bq
,
XDP_XMIT_FLUSH
,
true
);
}
}
...
...
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct
xdp_bulk_queue
*
bq
=
this_cpu_ptr
(
obj
->
bulkq
);
if
(
unlikely
(
bq
->
count
==
DEV_MAP_BULK_SIZE
))
bq_xmit_all
(
obj
,
bq
,
0
);
bq_xmit_all
(
obj
,
bq
,
0
,
true
);
/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
...
...
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit
(
dev
->
bit
,
bitmap
);
bq
=
per_cpu_ptr
(
dev
->
bulkq
,
cpu
);
bq_xmit_all
(
dev
,
bq
,
XDP_XMIT_FLUSH
);
bq_xmit_all
(
dev
,
bq
,
XDP_XMIT_FLUSH
,
false
);
}
}
}
...
...
kernel/bpf/sockmap.c
View file @
6a92ef08
...
...
@@ -1045,12 +1045,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo
=
sock_sndtimeo
(
sk
,
msg
->
msg_flags
&
MSG_DONTWAIT
);
while
(
msg_data_left
(
msg
))
{
struct
sk_msg_buff
*
m
;
struct
sk_msg_buff
*
m
=
NULL
;
bool
enospc
=
false
;
int
copy
;
if
(
sk
->
sk_err
)
{
err
=
sk
->
sk_err
;
err
=
-
sk
->
sk_err
;
goto
out_err
;
}
...
...
@@ -1113,8 +1113,11 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
set_bit
(
SOCK_NOSPACE
,
&
sk
->
sk_socket
->
flags
);
wait_for_memory:
err
=
sk_stream_wait_memory
(
sk
,
&
timeo
);
if
(
err
)
if
(
err
)
{
if
(
m
&&
m
!=
psock
->
cork
)
free_start_sg
(
sk
,
m
);
goto
out_err
;
}
}
out_err:
if
(
err
<
0
)
...
...
lib/Kconfig.ubsan
View file @
6a92ef08
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool
config ARCH_WANTS_UBSAN_NO_NULL
def_bool n
config UBSAN
bool "Undefined behaviour sanity checker"
help
...
...
@@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
config UBSAN_NULL
bool "Enable checking of null pointers"
depends on UBSAN
default y if !ARCH_WANTS_UBSAN_NO_NULL
help
This option enables detection of memory accesses via a
null pointer.
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m && UBSAN
...
...
mm/memory.c
View file @
6a92ef08
...
...
@@ -4395,6 +4395,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
return
-
EINVAL
;
maddr
=
ioremap_prot
(
phys_addr
,
PAGE_ALIGN
(
len
+
offset
),
prot
);
if
(
!
maddr
)
return
-
ENOMEM
;
if
(
write
)
memcpy_toio
(
maddr
+
offset
,
buf
,
len
);
else
...
...
samples/bpf/xdp_redirect_cpu_kern.c
View file @
6a92ef08
...
...
@@ -14,7 +14,7 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"
#define MAX_CPUS
12
/* WARNING - sync with _user.c */
#define MAX_CPUS
64
/* WARNING - sync with _user.c */
/* Special map type that can XDP_REDIRECT frames to another CPU */
struct
bpf_map_def
SEC
(
"maps"
)
cpu_map
=
{
...
...
samples/bpf/xdp_redirect_cpu_user.c
View file @
6a92ef08
...
...
@@ -19,7 +19,7 @@ static const char *__doc__ =
#include <arpa/inet.h>
#include <linux/if_link.h>
#define MAX_CPUS
12
/* WARNING - sync with _kern.c */
#define MAX_CPUS
64
/* WARNING - sync with _kern.c */
/* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 5
...
...
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
* procedure.
*/
create_cpu_entry
(
1
,
1024
,
0
,
false
);
create_cpu_entry
(
1
,
12
8
,
0
,
false
);
create_cpu_entry
(
1
,
8
,
0
,
false
);
create_cpu_entry
(
1
,
16000
,
0
,
false
);
}
...
...
scripts/Makefile.ubsan
View file @
6a92ef08
...
...
@@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT
CFLAGS_UBSAN
+=
$(
call
cc-option,
-fsanitize
=
alignment
)
endif
ifdef
CONFIG_UBSAN_NULL
CFLAGS_UBSAN
+=
$(
call
cc-option,
-fsanitize
=
null
)
endif
# -fsanitize=* options makes GCC less smart than usual and
# increase number of 'maybe-uninitialized false-positives
CFLAGS_UBSAN
+=
$(
call
cc-option,
-Wno-maybe-uninitialized
)
...
...
tools/lib/bpf/btf.c
View file @
6a92ef08
/
* SPDX-License-Identifier: GPL-2.0 */
/
/ SPDX-License-Identifier: LGPL-2.1
/* Copyright (c) 2018 Facebook */
#include <stdlib.h>
...
...
tools/lib/bpf/btf.h
View file @
6a92ef08
/* SPDX-License-Identifier:
GPL-2.0
*/
/* SPDX-License-Identifier:
LGPL-2.1
*/
/* Copyright (c) 2018 Facebook */
#ifndef __BPF_BTF_H
...
...
tools/testing/selftests/bpf/test_sockmap.c
View file @
6a92ef08
...
...
@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
while
(
s
->
bytes_recvd
<
total_bytes
)
{
if
(
txmsg_cork
)
{
timeout
.
tv_sec
=
0
;
timeout
.
tv_usec
=
1
000
;
timeout
.
tv_usec
=
300
000
;
}
else
{
timeout
.
tv_sec
=
1
;
timeout
.
tv_usec
=
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment