Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflict resolution of af_smc.c from Stephen Rothwell. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -78,6 +78,8 @@ ForEachMacros:
|
|||||||
- 'ata_qc_for_each_with_internal'
|
- 'ata_qc_for_each_with_internal'
|
||||||
- 'ax25_for_each'
|
- 'ax25_for_each'
|
||||||
- 'ax25_uid_for_each'
|
- 'ax25_uid_for_each'
|
||||||
|
- '__bio_for_each_bvec'
|
||||||
|
- 'bio_for_each_bvec'
|
||||||
- 'bio_for_each_integrity_vec'
|
- 'bio_for_each_integrity_vec'
|
||||||
- '__bio_for_each_segment'
|
- '__bio_for_each_segment'
|
||||||
- 'bio_for_each_segment'
|
- 'bio_for_each_segment'
|
||||||
@@ -118,10 +120,12 @@ ForEachMacros:
|
|||||||
- 'drm_for_each_legacy_plane'
|
- 'drm_for_each_legacy_plane'
|
||||||
- 'drm_for_each_plane'
|
- 'drm_for_each_plane'
|
||||||
- 'drm_for_each_plane_mask'
|
- 'drm_for_each_plane_mask'
|
||||||
|
- 'drm_for_each_privobj'
|
||||||
- 'drm_mm_for_each_hole'
|
- 'drm_mm_for_each_hole'
|
||||||
- 'drm_mm_for_each_node'
|
- 'drm_mm_for_each_node'
|
||||||
- 'drm_mm_for_each_node_in_range'
|
- 'drm_mm_for_each_node_in_range'
|
||||||
- 'drm_mm_for_each_node_safe'
|
- 'drm_mm_for_each_node_safe'
|
||||||
|
- 'flow_action_for_each'
|
||||||
- 'for_each_active_drhd_unit'
|
- 'for_each_active_drhd_unit'
|
||||||
- 'for_each_active_iommu'
|
- 'for_each_active_iommu'
|
||||||
- 'for_each_available_child_of_node'
|
- 'for_each_available_child_of_node'
|
||||||
@@ -158,6 +162,9 @@ ForEachMacros:
|
|||||||
- 'for_each_dss_dev'
|
- 'for_each_dss_dev'
|
||||||
- 'for_each_efi_memory_desc'
|
- 'for_each_efi_memory_desc'
|
||||||
- 'for_each_efi_memory_desc_in_map'
|
- 'for_each_efi_memory_desc_in_map'
|
||||||
|
- 'for_each_element'
|
||||||
|
- 'for_each_element_extid'
|
||||||
|
- 'for_each_element_id'
|
||||||
- 'for_each_endpoint_of_node'
|
- 'for_each_endpoint_of_node'
|
||||||
- 'for_each_evictable_lru'
|
- 'for_each_evictable_lru'
|
||||||
- 'for_each_fib6_node_rt_rcu'
|
- 'for_each_fib6_node_rt_rcu'
|
||||||
@@ -195,6 +202,7 @@ ForEachMacros:
|
|||||||
- 'for_each_net_rcu'
|
- 'for_each_net_rcu'
|
||||||
- 'for_each_new_connector_in_state'
|
- 'for_each_new_connector_in_state'
|
||||||
- 'for_each_new_crtc_in_state'
|
- 'for_each_new_crtc_in_state'
|
||||||
|
- 'for_each_new_mst_mgr_in_state'
|
||||||
- 'for_each_new_plane_in_state'
|
- 'for_each_new_plane_in_state'
|
||||||
- 'for_each_new_private_obj_in_state'
|
- 'for_each_new_private_obj_in_state'
|
||||||
- 'for_each_node'
|
- 'for_each_node'
|
||||||
@@ -210,8 +218,10 @@ ForEachMacros:
|
|||||||
- 'for_each_of_pci_range'
|
- 'for_each_of_pci_range'
|
||||||
- 'for_each_old_connector_in_state'
|
- 'for_each_old_connector_in_state'
|
||||||
- 'for_each_old_crtc_in_state'
|
- 'for_each_old_crtc_in_state'
|
||||||
|
- 'for_each_old_mst_mgr_in_state'
|
||||||
- 'for_each_oldnew_connector_in_state'
|
- 'for_each_oldnew_connector_in_state'
|
||||||
- 'for_each_oldnew_crtc_in_state'
|
- 'for_each_oldnew_crtc_in_state'
|
||||||
|
- 'for_each_oldnew_mst_mgr_in_state'
|
||||||
- 'for_each_oldnew_plane_in_state'
|
- 'for_each_oldnew_plane_in_state'
|
||||||
- 'for_each_oldnew_plane_in_state_reverse'
|
- 'for_each_oldnew_plane_in_state_reverse'
|
||||||
- 'for_each_oldnew_private_obj_in_state'
|
- 'for_each_oldnew_private_obj_in_state'
|
||||||
@@ -243,6 +253,9 @@ ForEachMacros:
|
|||||||
- 'for_each_sg_dma_page'
|
- 'for_each_sg_dma_page'
|
||||||
- 'for_each_sg_page'
|
- 'for_each_sg_page'
|
||||||
- 'for_each_sibling_event'
|
- 'for_each_sibling_event'
|
||||||
|
- 'for_each_subelement'
|
||||||
|
- 'for_each_subelement_extid'
|
||||||
|
- 'for_each_subelement_id'
|
||||||
- '__for_each_thread'
|
- '__for_each_thread'
|
||||||
- 'for_each_thread'
|
- 'for_each_thread'
|
||||||
- 'for_each_zone'
|
- 'for_each_zone'
|
||||||
@@ -252,6 +265,8 @@ ForEachMacros:
|
|||||||
- 'fwnode_for_each_child_node'
|
- 'fwnode_for_each_child_node'
|
||||||
- 'fwnode_graph_for_each_endpoint'
|
- 'fwnode_graph_for_each_endpoint'
|
||||||
- 'gadget_for_each_ep'
|
- 'gadget_for_each_ep'
|
||||||
|
- 'genradix_for_each'
|
||||||
|
- 'genradix_for_each_from'
|
||||||
- 'hash_for_each'
|
- 'hash_for_each'
|
||||||
- 'hash_for_each_possible'
|
- 'hash_for_each_possible'
|
||||||
- 'hash_for_each_possible_rcu'
|
- 'hash_for_each_possible_rcu'
|
||||||
@@ -293,7 +308,11 @@ ForEachMacros:
|
|||||||
- 'key_for_each'
|
- 'key_for_each'
|
||||||
- 'key_for_each_safe'
|
- 'key_for_each_safe'
|
||||||
- 'klp_for_each_func'
|
- 'klp_for_each_func'
|
||||||
|
- 'klp_for_each_func_safe'
|
||||||
|
- 'klp_for_each_func_static'
|
||||||
- 'klp_for_each_object'
|
- 'klp_for_each_object'
|
||||||
|
- 'klp_for_each_object_safe'
|
||||||
|
- 'klp_for_each_object_static'
|
||||||
- 'kvm_for_each_memslot'
|
- 'kvm_for_each_memslot'
|
||||||
- 'kvm_for_each_vcpu'
|
- 'kvm_for_each_vcpu'
|
||||||
- 'list_for_each'
|
- 'list_for_each'
|
||||||
@@ -324,6 +343,8 @@ ForEachMacros:
|
|||||||
- 'media_device_for_each_intf'
|
- 'media_device_for_each_intf'
|
||||||
- 'media_device_for_each_link'
|
- 'media_device_for_each_link'
|
||||||
- 'media_device_for_each_pad'
|
- 'media_device_for_each_pad'
|
||||||
|
- 'mp_bvec_for_each_page'
|
||||||
|
- 'mp_bvec_for_each_segment'
|
||||||
- 'nanddev_io_for_each_page'
|
- 'nanddev_io_for_each_page'
|
||||||
- 'netdev_for_each_lower_dev'
|
- 'netdev_for_each_lower_dev'
|
||||||
- 'netdev_for_each_lower_private'
|
- 'netdev_for_each_lower_private'
|
||||||
@@ -375,6 +396,7 @@ ForEachMacros:
|
|||||||
- 'rht_for_each_rcu'
|
- 'rht_for_each_rcu'
|
||||||
- 'rht_for_each_rcu_from'
|
- 'rht_for_each_rcu_from'
|
||||||
- '__rq_for_each_bio'
|
- '__rq_for_each_bio'
|
||||||
|
- 'rq_for_each_bvec'
|
||||||
- 'rq_for_each_segment'
|
- 'rq_for_each_segment'
|
||||||
- 'scsi_for_each_prot_sg'
|
- 'scsi_for_each_prot_sg'
|
||||||
- 'scsi_for_each_sg'
|
- 'scsi_for_each_sg'
|
||||||
@@ -410,6 +432,8 @@ ForEachMacros:
|
|||||||
- 'v4l2_m2m_for_each_src_buf_safe'
|
- 'v4l2_m2m_for_each_src_buf_safe'
|
||||||
- 'virtio_device_for_each_vq'
|
- 'virtio_device_for_each_vq'
|
||||||
- 'xa_for_each'
|
- 'xa_for_each'
|
||||||
|
- 'xa_for_each_marked'
|
||||||
|
- 'xa_for_each_start'
|
||||||
- 'xas_for_each'
|
- 'xas_for_each'
|
||||||
- 'xas_for_each_conflict'
|
- 'xas_for_each_conflict'
|
||||||
- 'xas_for_each_marked'
|
- 'xas_for_each_marked'
|
||||||
|
|||||||
@@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
|
|||||||
|
|
||||||
(*) Check call still alive.
|
(*) Check call still alive.
|
||||||
|
|
||||||
u32 rxrpc_kernel_check_life(struct socket *sock,
|
bool rxrpc_kernel_check_life(struct socket *sock,
|
||||||
struct rxrpc_call *call);
|
struct rxrpc_call *call,
|
||||||
|
u32 *_life);
|
||||||
void rxrpc_kernel_probe_life(struct socket *sock,
|
void rxrpc_kernel_probe_life(struct socket *sock,
|
||||||
struct rxrpc_call *call);
|
struct rxrpc_call *call);
|
||||||
|
|
||||||
The first function returns a number that is updated when ACKs are received
|
The first function passes back in *_life a number that is updated when
|
||||||
from the peer (notably including PING RESPONSE ACKs which we can elicit by
|
ACKs are received from the peer (notably including PING RESPONSE ACKs
|
||||||
sending PING ACKs to see if the call still exists on the server). The
|
which we can elicit by sending PING ACKs to see if the call still exists
|
||||||
caller should compare the numbers of two calls to see if the call is still
|
on the server). The caller should compare the numbers of two calls to see
|
||||||
alive after waiting for a suitable interval.
|
if the call is still alive after waiting for a suitable interval. It also
|
||||||
|
returns true as long as the call hasn't yet reached the completed state.
|
||||||
|
|
||||||
This allows the caller to work out if the server is still contactable and
|
This allows the caller to work out if the server is still contactable and
|
||||||
if the call is still alive on the server while waiting for the server to
|
if the call is still alive on the server while waiting for the server to
|
||||||
|
|||||||
17
MAINTAINERS
17
MAINTAINERS
@@ -10139,7 +10139,7 @@ F: drivers/spi/spi-at91-usart.c
|
|||||||
F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
|
F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
|
||||||
|
|
||||||
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
|
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
|
||||||
M: Woojung Huh <Woojung.Huh@microchip.com>
|
M: Woojung Huh <woojung.huh@microchip.com>
|
||||||
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
|
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@@ -16503,7 +16503,7 @@ F: drivers/char/virtio_console.c
|
|||||||
F: include/linux/virtio_console.h
|
F: include/linux/virtio_console.h
|
||||||
F: include/uapi/linux/virtio_console.h
|
F: include/uapi/linux/virtio_console.h
|
||||||
|
|
||||||
VIRTIO CORE, NET AND BLOCK DRIVERS
|
VIRTIO CORE AND NET DRIVERS
|
||||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||||
M: Jason Wang <jasowang@redhat.com>
|
M: Jason Wang <jasowang@redhat.com>
|
||||||
L: virtualization@lists.linux-foundation.org
|
L: virtualization@lists.linux-foundation.org
|
||||||
@@ -16518,6 +16518,19 @@ F: include/uapi/linux/virtio_*.h
|
|||||||
F: drivers/crypto/virtio/
|
F: drivers/crypto/virtio/
|
||||||
F: mm/balloon_compaction.c
|
F: mm/balloon_compaction.c
|
||||||
|
|
||||||
|
VIRTIO BLOCK AND SCSI DRIVERS
|
||||||
|
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||||
|
M: Jason Wang <jasowang@redhat.com>
|
||||||
|
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||||
|
R: Stefan Hajnoczi <stefanha@redhat.com>
|
||||||
|
L: virtualization@lists.linux-foundation.org
|
||||||
|
S: Maintained
|
||||||
|
F: drivers/block/virtio_blk.c
|
||||||
|
F: drivers/scsi/virtio_scsi.c
|
||||||
|
F: include/uapi/linux/virtio_blk.h
|
||||||
|
F: include/uapi/linux/virtio_scsi.h
|
||||||
|
F: drivers/vhost/scsi.c
|
||||||
|
|
||||||
VIRTIO CRYPTO DRIVER
|
VIRTIO CRYPTO DRIVER
|
||||||
M: Gonglei <arei.gonglei@huawei.com>
|
M: Gonglei <arei.gonglei@huawei.com>
|
||||||
L: virtualization@lists.linux-foundation.org
|
L: virtualization@lists.linux-foundation.org
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 1
|
PATCHLEVEL = 1
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Shy Crocodile
|
NAME = Shy Crocodile
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|||||||
@@ -30,8 +30,8 @@ do { \
|
|||||||
" prfm pstl1strm, %2\n" \
|
" prfm pstl1strm, %2\n" \
|
||||||
"1: ldxr %w1, %2\n" \
|
"1: ldxr %w1, %2\n" \
|
||||||
insn "\n" \
|
insn "\n" \
|
||||||
"2: stlxr %w3, %w0, %2\n" \
|
"2: stlxr %w0, %w3, %2\n" \
|
||||||
" cbnz %w3, 1b\n" \
|
" cbnz %w0, 1b\n" \
|
||||||
" dmb ish\n" \
|
" dmb ish\n" \
|
||||||
"3:\n" \
|
"3:\n" \
|
||||||
" .pushsection .fixup,\"ax\"\n" \
|
" .pushsection .fixup,\"ax\"\n" \
|
||||||
@@ -50,30 +50,30 @@ do { \
|
|||||||
static inline int
|
static inline int
|
||||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
||||||
{
|
{
|
||||||
int oldval = 0, ret, tmp;
|
int oldval, ret, tmp;
|
||||||
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
|
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
|
||||||
|
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case FUTEX_OP_SET:
|
case FUTEX_OP_SET:
|
||||||
__futex_atomic_op("mov %w0, %w4",
|
__futex_atomic_op("mov %w3, %w4",
|
||||||
ret, oldval, uaddr, tmp, oparg);
|
ret, oldval, uaddr, tmp, oparg);
|
||||||
break;
|
break;
|
||||||
case FUTEX_OP_ADD:
|
case FUTEX_OP_ADD:
|
||||||
__futex_atomic_op("add %w0, %w1, %w4",
|
__futex_atomic_op("add %w3, %w1, %w4",
|
||||||
ret, oldval, uaddr, tmp, oparg);
|
ret, oldval, uaddr, tmp, oparg);
|
||||||
break;
|
break;
|
||||||
case FUTEX_OP_OR:
|
case FUTEX_OP_OR:
|
||||||
__futex_atomic_op("orr %w0, %w1, %w4",
|
__futex_atomic_op("orr %w3, %w1, %w4",
|
||||||
ret, oldval, uaddr, tmp, oparg);
|
ret, oldval, uaddr, tmp, oparg);
|
||||||
break;
|
break;
|
||||||
case FUTEX_OP_ANDN:
|
case FUTEX_OP_ANDN:
|
||||||
__futex_atomic_op("and %w0, %w1, %w4",
|
__futex_atomic_op("and %w3, %w1, %w4",
|
||||||
ret, oldval, uaddr, tmp, ~oparg);
|
ret, oldval, uaddr, tmp, ~oparg);
|
||||||
break;
|
break;
|
||||||
case FUTEX_OP_XOR:
|
case FUTEX_OP_XOR:
|
||||||
__futex_atomic_op("eor %w0, %w1, %w4",
|
__futex_atomic_op("eor %w3, %w1, %w4",
|
||||||
ret, oldval, uaddr, tmp, oparg);
|
ret, oldval, uaddr, tmp, oparg);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
|
|||||||
struct plt_entry get_plt_entry(u64 dst, void *pc);
|
struct plt_entry get_plt_entry(u64 dst, void *pc);
|
||||||
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
|
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
|
||||||
|
|
||||||
|
static inline bool plt_entry_is_initialized(const struct plt_entry *e)
|
||||||
|
{
|
||||||
|
return e->adrp || e->add || e->br;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __ASM_MODULE_H */
|
#endif /* __ASM_MODULE_H */
|
||||||
|
|||||||
@@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||||||
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
||||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
||||||
&trampoline)) {
|
&trampoline)) {
|
||||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
|
||||||
&(struct plt_entry){})) {
|
|
||||||
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
|
|||||||
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
int skip;
|
int skip = 0;
|
||||||
|
|
||||||
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
||||||
|
|
||||||
|
if (regs) {
|
||||||
|
if (user_mode(regs))
|
||||||
|
return;
|
||||||
|
skip = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (!tsk)
|
if (!tsk)
|
||||||
tsk = current;
|
tsk = current;
|
||||||
|
|
||||||
@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|||||||
frame.graph = 0;
|
frame.graph = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
skip = !!regs;
|
|
||||||
printk("Call trace:\n");
|
printk("Call trace:\n");
|
||||||
do {
|
do {
|
||||||
/* skip until specified stack frame */
|
/* skip until specified stack frame */
|
||||||
@@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
print_modules();
|
print_modules();
|
||||||
__show_regs(regs);
|
|
||||||
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
|
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
|
||||||
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
|
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
|
||||||
end_of_stack(tsk));
|
end_of_stack(tsk));
|
||||||
|
show_regs(regs);
|
||||||
|
|
||||||
if (!user_mode(regs)) {
|
if (!user_mode(regs))
|
||||||
dump_backtrace(regs, tsk);
|
|
||||||
dump_instr(KERN_EMERG, regs);
|
dump_instr(KERN_EMERG, regs);
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
# require CONFIG_CPU_MIPS32_R2=y
|
# require CONFIG_CPU_MIPS32_R2=y
|
||||||
|
|
||||||
CONFIG_LEGACY_BOARD_OCELOT=y
|
CONFIG_LEGACY_BOARD_OCELOT=y
|
||||||
|
CONFIG_FIT_IMAGE_FDT_OCELOT=y
|
||||||
|
|
||||||
|
CONFIG_BRIDGE=y
|
||||||
|
CONFIG_GENERIC_PHY=y
|
||||||
|
|
||||||
CONFIG_MTD=y
|
CONFIG_MTD=y
|
||||||
CONFIG_MTD_CMDLINE_PARTS=y
|
CONFIG_MTD_CMDLINE_PARTS=y
|
||||||
@@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
|
|||||||
CONFIG_SERIAL_OF_PLATFORM=y
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
|
|
||||||
CONFIG_NETDEVICES=y
|
CONFIG_NETDEVICES=y
|
||||||
|
CONFIG_NET_SWITCHDEV=y
|
||||||
|
CONFIG_NET_DSA=y
|
||||||
CONFIG_MSCC_OCELOT_SWITCH=y
|
CONFIG_MSCC_OCELOT_SWITCH=y
|
||||||
CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
|
CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
|
||||||
CONFIG_MDIO_MSCC_MIIM=y
|
CONFIG_MDIO_MSCC_MIIM=y
|
||||||
@@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y
|
|||||||
CONFIG_SPI_DW_MMIO=y
|
CONFIG_SPI_DW_MMIO=y
|
||||||
CONFIG_SPI_SPIDEV=y
|
CONFIG_SPI_SPIDEV=y
|
||||||
|
|
||||||
|
CONFIG_PINCTRL_OCELOT=y
|
||||||
|
|
||||||
CONFIG_GPIO_SYSFS=y
|
CONFIG_GPIO_SYSFS=y
|
||||||
|
|
||||||
CONFIG_POWER_RESET=y
|
CONFIG_POWER_RESET=y
|
||||||
|
|||||||
@@ -33,6 +33,7 @@
|
|||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/sigcontext.h>
|
#include <asm/sigcontext.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <asm/irq_regs.h>
|
||||||
|
|
||||||
static struct hard_trap_info {
|
static struct hard_trap_info {
|
||||||
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
|
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
|
||||||
@@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored)
|
|||||||
old_fs = get_fs();
|
old_fs = get_fs();
|
||||||
set_fs(KERNEL_DS);
|
set_fs(KERNEL_DS);
|
||||||
|
|
||||||
kgdb_nmicallback(raw_smp_processor_id(), NULL);
|
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
|
||||||
|
|
||||||
set_fs(old_fs);
|
set_fs(old_fs);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d)
|
|||||||
{
|
{
|
||||||
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
|
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
|
||||||
struct bridge_controller *bc;
|
struct bridge_controller *bc;
|
||||||
int pin = hd->pin;
|
|
||||||
|
|
||||||
if (!hd)
|
if (!hd)
|
||||||
return;
|
return;
|
||||||
@@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d)
|
|||||||
disable_hub_irq(d);
|
disable_hub_irq(d);
|
||||||
|
|
||||||
bc = hd->bc;
|
bc = hd->bc;
|
||||||
bridge_clr(bc, b_int_enable, (1 << pin));
|
bridge_clr(bc, b_int_enable, (1 << hd->pin));
|
||||||
bridge_read(bc, b_wid_tflush);
|
bridge_read(bc, b_wid_tflush);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
|
|||||||
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
|
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
|
||||||
defined (CONFIG_PPC_64K_PAGES)
|
defined (CONFIG_PPC_64K_PAGES)
|
||||||
#define MAX_PHYSMEM_BITS 51
|
#define MAX_PHYSMEM_BITS 51
|
||||||
#elif defined(CONFIG_SPARSEMEM)
|
#elif defined(CONFIG_PPC64)
|
||||||
#define MAX_PHYSMEM_BITS 46
|
#define MAX_PHYSMEM_BITS 46
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
|
|||||||
ld r4,PACA_EXSLB+EX_DAR(r13)
|
ld r4,PACA_EXSLB+EX_DAR(r13)
|
||||||
std r4,_DAR(r1)
|
std r4,_DAR(r1)
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
/* HPT case, do SLB fault */
|
||||||
bl do_slb_fault
|
bl do_slb_fault
|
||||||
cmpdi r3,0
|
cmpdi r3,0
|
||||||
bne- 1f
|
bne- 1f
|
||||||
b fast_exception_return
|
b fast_exception_return
|
||||||
1: /* Error case */
|
1: /* Error case */
|
||||||
|
MMU_FTR_SECTION_ELSE
|
||||||
|
/* Radix case, access is outside page table range */
|
||||||
|
li r3,-EFAULT
|
||||||
|
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||||
std r3,RESULT(r1)
|
std r3,RESULT(r1)
|
||||||
bl save_nvgprs
|
bl save_nvgprs
|
||||||
RECONCILE_IRQ_STATE(r10, r11)
|
RECONCILE_IRQ_STATE(r10, r11)
|
||||||
@@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
|
|||||||
EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
|
EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
|
||||||
ld r4,_NIP(r1)
|
ld r4,_NIP(r1)
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
/* HPT case, do SLB fault */
|
||||||
bl do_slb_fault
|
bl do_slb_fault
|
||||||
cmpdi r3,0
|
cmpdi r3,0
|
||||||
bne- 1f
|
bne- 1f
|
||||||
b fast_exception_return
|
b fast_exception_return
|
||||||
1: /* Error case */
|
1: /* Error case */
|
||||||
|
MMU_FTR_SECTION_ELSE
|
||||||
|
/* Radix case, access is outside page table range */
|
||||||
|
li r3,-EFAULT
|
||||||
|
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||||
std r3,RESULT(r1)
|
std r3,RESULT(r1)
|
||||||
bl save_nvgprs
|
bl save_nvgprs
|
||||||
RECONCILE_IRQ_STATE(r10, r11)
|
RECONCILE_IRQ_STATE(r10, r11)
|
||||||
|
|||||||
@@ -851,10 +851,6 @@ __secondary_start:
|
|||||||
tophys(r4,r2)
|
tophys(r4,r2)
|
||||||
addi r4,r4,THREAD /* phys address of our thread_struct */
|
addi r4,r4,THREAD /* phys address of our thread_struct */
|
||||||
mtspr SPRN_SPRG_THREAD,r4
|
mtspr SPRN_SPRG_THREAD,r4
|
||||||
#ifdef CONFIG_PPC_RTAS
|
|
||||||
li r3,0
|
|
||||||
stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
|
|
||||||
#endif
|
|
||||||
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
|
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
|
||||||
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
|
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
|
||||||
mtspr SPRN_SPRG_PGDIR, r4
|
mtspr SPRN_SPRG_PGDIR, r4
|
||||||
@@ -941,10 +937,6 @@ start_here:
|
|||||||
tophys(r4,r2)
|
tophys(r4,r2)
|
||||||
addi r4,r4,THREAD /* init task's THREAD */
|
addi r4,r4,THREAD /* init task's THREAD */
|
||||||
mtspr SPRN_SPRG_THREAD,r4
|
mtspr SPRN_SPRG_THREAD,r4
|
||||||
#ifdef CONFIG_PPC_RTAS
|
|
||||||
li r3,0
|
|
||||||
stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
|
|
||||||
#endif
|
|
||||||
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
|
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
|
||||||
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
|
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
|
||||||
mtspr SPRN_SPRG_PGDIR, r4
|
mtspr SPRN_SPRG_PGDIR, r4
|
||||||
|
|||||||
@@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
|
|||||||
* can be used, r7 contains NSEC_PER_SEC.
|
* can be used, r7 contains NSEC_PER_SEC.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
lwz r5,WTOM_CLOCK_SEC(r9)
|
lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
|
||||||
lwz r6,WTOM_CLOCK_NSEC(r9)
|
lwz r6,WTOM_CLOCK_NSEC(r9)
|
||||||
|
|
||||||
/* We now have our offset in r5,r6. We create a fake dependency
|
/* We now have our offset in r5,r6. We create a fake dependency
|
||||||
|
|||||||
84
arch/riscv/configs/rv32_defconfig
Normal file
84
arch/riscv/configs/rv32_defconfig
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
CONFIG_SYSVIPC=y
|
||||||
|
CONFIG_POSIX_MQUEUE=y
|
||||||
|
CONFIG_IKCONFIG=y
|
||||||
|
CONFIG_IKCONFIG_PROC=y
|
||||||
|
CONFIG_CGROUPS=y
|
||||||
|
CONFIG_CGROUP_SCHED=y
|
||||||
|
CONFIG_CFS_BANDWIDTH=y
|
||||||
|
CONFIG_CGROUP_BPF=y
|
||||||
|
CONFIG_NAMESPACES=y
|
||||||
|
CONFIG_USER_NS=y
|
||||||
|
CONFIG_CHECKPOINT_RESTORE=y
|
||||||
|
CONFIG_BLK_DEV_INITRD=y
|
||||||
|
CONFIG_EXPERT=y
|
||||||
|
CONFIG_BPF_SYSCALL=y
|
||||||
|
CONFIG_ARCH_RV32I=y
|
||||||
|
CONFIG_SMP=y
|
||||||
|
CONFIG_MODULES=y
|
||||||
|
CONFIG_MODULE_UNLOAD=y
|
||||||
|
CONFIG_NET=y
|
||||||
|
CONFIG_PACKET=y
|
||||||
|
CONFIG_UNIX=y
|
||||||
|
CONFIG_INET=y
|
||||||
|
CONFIG_IP_MULTICAST=y
|
||||||
|
CONFIG_IP_ADVANCED_ROUTER=y
|
||||||
|
CONFIG_IP_PNP=y
|
||||||
|
CONFIG_IP_PNP_DHCP=y
|
||||||
|
CONFIG_IP_PNP_BOOTP=y
|
||||||
|
CONFIG_IP_PNP_RARP=y
|
||||||
|
CONFIG_NETLINK_DIAG=y
|
||||||
|
CONFIG_PCI=y
|
||||||
|
CONFIG_PCIEPORTBUS=y
|
||||||
|
CONFIG_PCI_HOST_GENERIC=y
|
||||||
|
CONFIG_PCIE_XILINX=y
|
||||||
|
CONFIG_DEVTMPFS=y
|
||||||
|
CONFIG_BLK_DEV_LOOP=y
|
||||||
|
CONFIG_VIRTIO_BLK=y
|
||||||
|
CONFIG_BLK_DEV_SD=y
|
||||||
|
CONFIG_BLK_DEV_SR=y
|
||||||
|
CONFIG_ATA=y
|
||||||
|
CONFIG_SATA_AHCI=y
|
||||||
|
CONFIG_SATA_AHCI_PLATFORM=y
|
||||||
|
CONFIG_NETDEVICES=y
|
||||||
|
CONFIG_VIRTIO_NET=y
|
||||||
|
CONFIG_MACB=y
|
||||||
|
CONFIG_E1000E=y
|
||||||
|
CONFIG_R8169=y
|
||||||
|
CONFIG_MICROSEMI_PHY=y
|
||||||
|
CONFIG_INPUT_MOUSEDEV=y
|
||||||
|
CONFIG_SERIAL_8250=y
|
||||||
|
CONFIG_SERIAL_8250_CONSOLE=y
|
||||||
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
|
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
|
||||||
|
CONFIG_HVC_RISCV_SBI=y
|
||||||
|
# CONFIG_PTP_1588_CLOCK is not set
|
||||||
|
CONFIG_DRM=y
|
||||||
|
CONFIG_DRM_RADEON=y
|
||||||
|
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||||
|
CONFIG_USB=y
|
||||||
|
CONFIG_USB_XHCI_HCD=y
|
||||||
|
CONFIG_USB_XHCI_PLATFORM=y
|
||||||
|
CONFIG_USB_EHCI_HCD=y
|
||||||
|
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||||
|
CONFIG_USB_OHCI_HCD=y
|
||||||
|
CONFIG_USB_OHCI_HCD_PLATFORM=y
|
||||||
|
CONFIG_USB_STORAGE=y
|
||||||
|
CONFIG_USB_UAS=y
|
||||||
|
CONFIG_VIRTIO_MMIO=y
|
||||||
|
CONFIG_SIFIVE_PLIC=y
|
||||||
|
CONFIG_EXT4_FS=y
|
||||||
|
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||||
|
CONFIG_AUTOFS4_FS=y
|
||||||
|
CONFIG_MSDOS_FS=y
|
||||||
|
CONFIG_VFAT_FS=y
|
||||||
|
CONFIG_TMPFS=y
|
||||||
|
CONFIG_TMPFS_POSIX_ACL=y
|
||||||
|
CONFIG_NFS_FS=y
|
||||||
|
CONFIG_NFS_V4=y
|
||||||
|
CONFIG_NFS_V4_1=y
|
||||||
|
CONFIG_NFS_V4_2=y
|
||||||
|
CONFIG_ROOT_NFS=y
|
||||||
|
CONFIG_CRYPTO_USER_API_HASH=y
|
||||||
|
CONFIG_CRYPTO_DEV_VIRTIO=y
|
||||||
|
CONFIG_PRINTK_TIME=y
|
||||||
|
# CONFIG_RCU_TRACE is not set
|
||||||
@@ -121,6 +121,14 @@ void __init setup_bootmem(void)
|
|||||||
*/
|
*/
|
||||||
memblock_reserve(reg->base, vmlinux_end - reg->base);
|
memblock_reserve(reg->base, vmlinux_end - reg->base);
|
||||||
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
|
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove memblock from the end of usable area to the
|
||||||
|
* end of region
|
||||||
|
*/
|
||||||
|
if (reg->base + mem_size < end)
|
||||||
|
memblock_remove(reg->base + mem_size,
|
||||||
|
end - reg->base - mem_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BUG_ON(mem_size == 0);
|
BUG_ON(mem_size == 0);
|
||||||
|
|||||||
@@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
|
|||||||
p->npages = 0;
|
p->npages = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
|
||||||
|
{
|
||||||
|
return iommu->atu && mask > DMA_BIT_MASK(32);
|
||||||
|
}
|
||||||
|
|
||||||
/* Interrupts must be disabled. */
|
/* Interrupts must be disabled. */
|
||||||
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
|
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
|
||||||
{
|
{
|
||||||
@@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
|
|||||||
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
|
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
|
||||||
|
|
||||||
while (npages != 0) {
|
while (npages != 0) {
|
||||||
if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) {
|
if (!iommu_use_atu(pbm->iommu, mask)) {
|
||||||
num = pci_sun4v_iommu_map(devhandle,
|
num = pci_sun4v_iommu_map(devhandle,
|
||||||
HV_PCI_TSBID(0, entry),
|
HV_PCI_TSBID(0, entry),
|
||||||
npages,
|
npages,
|
||||||
@@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
|||||||
unsigned long flags, order, first_page, npages, n;
|
unsigned long flags, order, first_page, npages, n;
|
||||||
unsigned long prot = 0;
|
unsigned long prot = 0;
|
||||||
struct iommu *iommu;
|
struct iommu *iommu;
|
||||||
struct atu *atu;
|
|
||||||
struct iommu_map_table *tbl;
|
struct iommu_map_table *tbl;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *ret;
|
void *ret;
|
||||||
@@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
|||||||
memset((char *)first_page, 0, PAGE_SIZE << order);
|
memset((char *)first_page, 0, PAGE_SIZE << order);
|
||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
atu = iommu->atu;
|
|
||||||
|
|
||||||
mask = dev->coherent_dma_mask;
|
mask = dev->coherent_dma_mask;
|
||||||
if (mask <= DMA_BIT_MASK(32) || !atu)
|
if (!iommu_use_atu(iommu, mask))
|
||||||
tbl = &iommu->tbl;
|
tbl = &iommu->tbl;
|
||||||
else
|
else
|
||||||
tbl = &atu->tbl;
|
tbl = &iommu->atu->tbl;
|
||||||
|
|
||||||
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
|
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
|
||||||
(unsigned long)(-1), 0);
|
(unsigned long)(-1), 0);
|
||||||
@@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
|
|||||||
atu = iommu->atu;
|
atu = iommu->atu;
|
||||||
devhandle = pbm->devhandle;
|
devhandle = pbm->devhandle;
|
||||||
|
|
||||||
if (dvma <= DMA_BIT_MASK(32)) {
|
if (!iommu_use_atu(iommu, dvma)) {
|
||||||
tbl = &iommu->tbl;
|
tbl = &iommu->tbl;
|
||||||
iotsb_num = 0; /* we don't care for legacy iommu */
|
iotsb_num = 0; /* we don't care for legacy iommu */
|
||||||
} else {
|
} else {
|
||||||
@@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
|||||||
npages >>= IO_PAGE_SHIFT;
|
npages >>= IO_PAGE_SHIFT;
|
||||||
|
|
||||||
mask = *dev->dma_mask;
|
mask = *dev->dma_mask;
|
||||||
if (mask <= DMA_BIT_MASK(32))
|
if (!iommu_use_atu(iommu, mask))
|
||||||
tbl = &iommu->tbl;
|
tbl = &iommu->tbl;
|
||||||
else
|
else
|
||||||
tbl = &atu->tbl;
|
tbl = &atu->tbl;
|
||||||
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
|
||||||
|
|
||||||
mask = *dev->dma_mask;
|
mask = *dev->dma_mask;
|
||||||
if (mask <= DMA_BIT_MASK(32))
|
if (!iommu_use_atu(iommu, mask))
|
||||||
tbl = &iommu->tbl;
|
tbl = &iommu->tbl;
|
||||||
else
|
else
|
||||||
tbl = &atu->tbl;
|
tbl = &atu->tbl;
|
||||||
|
|||||||
@@ -3,10 +3,14 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <asm/apicdef.h>
|
#include <asm/apicdef.h>
|
||||||
|
#include <asm/nmi.h>
|
||||||
|
|
||||||
#include "../perf_event.h"
|
#include "../perf_event.h"
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
|
||||||
|
|
||||||
static __initconst const u64 amd_hw_cache_event_ids
|
static __initconst const u64 amd_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When a PMC counter overflows, an NMI is used to process the event and
|
||||||
|
* reset the counter. NMI latency can result in the counter being updated
|
||||||
|
* before the NMI can run, which can result in what appear to be spurious
|
||||||
|
* NMIs. This function is intended to wait for the NMI to run and reset
|
||||||
|
* the counter to avoid possible unhandled NMI messages.
|
||||||
|
*/
|
||||||
|
#define OVERFLOW_WAIT_COUNT 50
|
||||||
|
|
||||||
|
static void amd_pmu_wait_on_overflow(int idx)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
u64 counter;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait for the counter to be reset if it has overflowed. This loop
|
||||||
|
* should exit very, very quickly, but just in case, don't wait
|
||||||
|
* forever...
|
||||||
|
*/
|
||||||
|
for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
|
||||||
|
rdmsrl(x86_pmu_event_addr(idx), counter);
|
||||||
|
if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Might be in IRQ context, so can't sleep */
|
||||||
|
udelay(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void amd_pmu_disable_all(void)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
x86_pmu_disable_all();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This shouldn't be called from NMI context, but add a safeguard here
|
||||||
|
* to return, since if we're in NMI context we can't wait for an NMI
|
||||||
|
* to reset an overflowed counter value.
|
||||||
|
*/
|
||||||
|
if (in_nmi())
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check each counter for overflow and wait for it to be reset by the
|
||||||
|
* NMI if it has overflowed. This relies on the fact that all active
|
||||||
|
* counters are always enabled when this function is caled and
|
||||||
|
* ARCH_PERFMON_EVENTSEL_INT is always set.
|
||||||
|
*/
|
||||||
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
amd_pmu_wait_on_overflow(idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void amd_pmu_disable_event(struct perf_event *event)
|
||||||
|
{
|
||||||
|
x86_pmu_disable_event(event);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This can be called from NMI context (via x86_pmu_stop). The counter
|
||||||
|
* may have overflowed, but either way, we'll never see it get reset
|
||||||
|
* by the NMI if we're already in the NMI. And the NMI latency support
|
||||||
|
* below will take care of any pending NMI that might have been
|
||||||
|
* generated by the overflow.
|
||||||
|
*/
|
||||||
|
if (in_nmi())
|
||||||
|
return;
|
||||||
|
|
||||||
|
amd_pmu_wait_on_overflow(event->hw.idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Because of NMI latency, if multiple PMC counters are active or other sources
|
||||||
|
* of NMIs are received, the perf NMI handler can handle one or more overflowed
|
||||||
|
* PMC counters outside of the NMI associated with the PMC overflow. If the NMI
|
||||||
|
* doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
|
||||||
|
* back-to-back NMI support won't be active. This PMC handler needs to take into
|
||||||
|
* account that this can occur, otherwise this could result in unknown NMI
|
||||||
|
* messages being issued. Examples of this is PMC overflow while in the NMI
|
||||||
|
* handler when multiple PMCs are active or PMC overflow while handling some
|
||||||
|
* other source of an NMI.
|
||||||
|
*
|
||||||
|
* Attempt to mitigate this by using the number of active PMCs to determine
|
||||||
|
* whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
|
||||||
|
* any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
|
||||||
|
* number of active PMCs or 2. The value of 2 is used in case an NMI does not
|
||||||
|
* arrive at the LAPIC in time to be collapsed into an already pending NMI.
|
||||||
|
*/
|
||||||
|
static int amd_pmu_handle_irq(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||||
|
int active, handled;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Obtain the active count before calling x86_pmu_handle_irq() since
|
||||||
|
* it is possible that x86_pmu_handle_irq() may make a counter
|
||||||
|
* inactive (through x86_pmu_stop).
|
||||||
|
*/
|
||||||
|
active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
|
||||||
|
|
||||||
|
/* Process any counter overflows */
|
||||||
|
handled = x86_pmu_handle_irq(regs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a counter was handled, record the number of possible remaining
|
||||||
|
* NMIs that can occur.
|
||||||
|
*/
|
||||||
|
if (handled) {
|
||||||
|
this_cpu_write(perf_nmi_counter,
|
||||||
|
min_t(unsigned int, 2, active));
|
||||||
|
|
||||||
|
return handled;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this_cpu_read(perf_nmi_counter))
|
||||||
|
return NMI_DONE;
|
||||||
|
|
||||||
|
this_cpu_dec(perf_nmi_counter);
|
||||||
|
|
||||||
|
return NMI_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
static struct event_constraint *
|
static struct event_constraint *
|
||||||
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
||||||
struct perf_event *event)
|
struct perf_event *event)
|
||||||
@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
|
|||||||
|
|
||||||
static __initconst const struct x86_pmu amd_pmu = {
|
static __initconst const struct x86_pmu amd_pmu = {
|
||||||
.name = "AMD",
|
.name = "AMD",
|
||||||
.handle_irq = x86_pmu_handle_irq,
|
.handle_irq = amd_pmu_handle_irq,
|
||||||
.disable_all = x86_pmu_disable_all,
|
.disable_all = amd_pmu_disable_all,
|
||||||
.enable_all = x86_pmu_enable_all,
|
.enable_all = x86_pmu_enable_all,
|
||||||
.enable = x86_pmu_enable_event,
|
.enable = x86_pmu_enable_event,
|
||||||
.disable = x86_pmu_disable_event,
|
.disable = amd_pmu_disable_event,
|
||||||
.hw_config = amd_pmu_hw_config,
|
.hw_config = amd_pmu_hw_config,
|
||||||
.schedule_events = x86_schedule_events,
|
.schedule_events = x86_schedule_events,
|
||||||
.eventsel = MSR_K7_EVNTSEL0,
|
.eventsel = MSR_K7_EVNTSEL0,
|
||||||
@@ -732,7 +862,7 @@ void amd_pmu_enable_virt(void)
|
|||||||
cpuc->perf_ctr_virt_mask = 0;
|
cpuc->perf_ctr_virt_mask = 0;
|
||||||
|
|
||||||
/* Reload all events */
|
/* Reload all events */
|
||||||
x86_pmu_disable_all();
|
amd_pmu_disable_all();
|
||||||
x86_pmu_enable_all(0);
|
x86_pmu_enable_all(0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
|
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
|
||||||
@@ -750,7 +880,7 @@ void amd_pmu_disable_virt(void)
|
|||||||
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
||||||
|
|
||||||
/* Reload all events */
|
/* Reload all events */
|
||||||
x86_pmu_disable_all();
|
amd_pmu_disable_all();
|
||||||
x86_pmu_enable_all(0);
|
x86_pmu_enable_all(0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
|
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
|
||||||
|
|||||||
@@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
|
|||||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
|
if (test_bit(hwc->idx, cpuc->active_mask)) {
|
||||||
x86_pmu.disable(event);
|
x86_pmu.disable(event);
|
||||||
|
__clear_bit(hwc->idx, cpuc->active_mask);
|
||||||
cpuc->events[hwc->idx] = NULL;
|
cpuc->events[hwc->idx] = NULL;
|
||||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||||
hwc->state |= PERF_HES_STOPPED;
|
hwc->state |= PERF_HES_STOPPED;
|
||||||
@@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
|||||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
if (!test_bit(idx, cpuc->active_mask)) {
|
if (!test_bit(idx, cpuc->active_mask))
|
||||||
/*
|
|
||||||
* Though we deactivated the counter some cpus
|
|
||||||
* might still deliver spurious interrupts still
|
|
||||||
* in flight. Catch them:
|
|
||||||
*/
|
|
||||||
if (__test_and_clear_bit(idx, cpuc->running))
|
|
||||||
handled++;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
event = cpuc->events[idx];
|
event = cpuc->events[idx];
|
||||||
|
|
||||||
|
|||||||
@@ -3185,7 +3185,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (event->attr.precise_ip) {
|
if (event->attr.precise_ip) {
|
||||||
if (!event->attr.freq) {
|
if (!(event->attr.freq || event->attr.wakeup_events)) {
|
||||||
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
||||||
if (!(event->attr.sample_type &
|
if (!(event->attr.sample_type &
|
||||||
~intel_pmu_large_pebs_flags(event)))
|
~intel_pmu_large_pebs_flags(event)))
|
||||||
@@ -3575,6 +3575,12 @@ static void intel_pmu_cpu_starting(int cpu)
|
|||||||
|
|
||||||
cpuc->lbr_sel = NULL;
|
cpuc->lbr_sel = NULL;
|
||||||
|
|
||||||
|
if (x86_pmu.flags & PMU_FL_TFA) {
|
||||||
|
WARN_ON_ONCE(cpuc->tfa_shadow);
|
||||||
|
cpuc->tfa_shadow = ~0ULL;
|
||||||
|
intel_set_tfa(cpuc, false);
|
||||||
|
}
|
||||||
|
|
||||||
if (x86_pmu.version > 1)
|
if (x86_pmu.version > 1)
|
||||||
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
|
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
|
||||||
|
|
||||||
|
|||||||
@@ -36,16 +36,17 @@
|
|||||||
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
|
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
|
#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
|
||||||
|
#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
|
||||||
|
|
||||||
#define ADDR BITOP_ADDR(addr)
|
#define ADDR RLONG_ADDR(addr)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do the locked ops that don't return the old value as
|
* We do the locked ops that don't return the old value as
|
||||||
* a mask operation on a byte.
|
* a mask operation on a byte.
|
||||||
*/
|
*/
|
||||||
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
|
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
|
||||||
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
|
#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
|
||||||
#define CONST_MASK(nr) (1 << ((nr) & 7))
|
#define CONST_MASK(nr) (1 << ((nr) & 7))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
|
|||||||
: "memory");
|
: "memory");
|
||||||
} else {
|
} else {
|
||||||
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
|
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
|
||||||
: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
|
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
|
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
|
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
|
|||||||
: "iq" ((u8)~CONST_MASK(nr)));
|
: "iq" ((u8)~CONST_MASK(nr)));
|
||||||
} else {
|
} else {
|
||||||
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
|
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
|
||||||
: BITOP_ADDR(addr)
|
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
|
||||||
: "Ir" (nr));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
|
|||||||
|
|
||||||
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
|
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
|
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
|
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
|
||||||
@@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
|
|||||||
bool negative;
|
bool negative;
|
||||||
asm volatile(LOCK_PREFIX "andb %2,%1"
|
asm volatile(LOCK_PREFIX "andb %2,%1"
|
||||||
CC_SET(s)
|
CC_SET(s)
|
||||||
: CC_OUT(s) (negative), ADDR
|
: CC_OUT(s) (negative), WBYTE_ADDR(addr)
|
||||||
: "ir" ((char) ~(1 << nr)) : "memory");
|
: "ir" ((char) ~(1 << nr)) : "memory");
|
||||||
return negative;
|
return negative;
|
||||||
}
|
}
|
||||||
@@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
|
|||||||
* __clear_bit() is non-atomic and implies release semantics before the memory
|
* __clear_bit() is non-atomic and implies release semantics before the memory
|
||||||
* operation. It can be used for an unlock if no other CPUs can concurrently
|
* operation. It can be used for an unlock if no other CPUs can concurrently
|
||||||
* modify other bits in the word.
|
* modify other bits in the word.
|
||||||
*
|
|
||||||
* No memory barrier is required here, because x86 cannot reorder stores past
|
|
||||||
* older loads. Same principle as spin_unlock.
|
|
||||||
*/
|
*/
|
||||||
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
|
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
barrier();
|
|
||||||
__clear_bit(nr, addr);
|
__clear_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
|
|||||||
*/
|
*/
|
||||||
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
|
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
|
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
|
|||||||
: "iq" ((u8)CONST_MASK(nr)));
|
: "iq" ((u8)CONST_MASK(nr)));
|
||||||
} else {
|
} else {
|
||||||
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
|
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
|
||||||
: BITOP_ADDR(addr)
|
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
|
||||||
: "Ir" (nr));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
|
|||||||
|
|
||||||
asm(__ASM_SIZE(bts) " %2,%1"
|
asm(__ASM_SIZE(bts) " %2,%1"
|
||||||
CC_SET(c)
|
CC_SET(c)
|
||||||
: CC_OUT(c) (oldbit), ADDR
|
: CC_OUT(c) (oldbit)
|
||||||
: "Ir" (nr));
|
: ADDR, "Ir" (nr) : "memory");
|
||||||
return oldbit;
|
return oldbit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
|
|||||||
|
|
||||||
asm volatile(__ASM_SIZE(btr) " %2,%1"
|
asm volatile(__ASM_SIZE(btr) " %2,%1"
|
||||||
CC_SET(c)
|
CC_SET(c)
|
||||||
: CC_OUT(c) (oldbit), ADDR
|
: CC_OUT(c) (oldbit)
|
||||||
: "Ir" (nr));
|
: ADDR, "Ir" (nr) : "memory");
|
||||||
return oldbit;
|
return oldbit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
|
|||||||
|
|
||||||
asm volatile(__ASM_SIZE(btc) " %2,%1"
|
asm volatile(__ASM_SIZE(btc) " %2,%1"
|
||||||
CC_SET(c)
|
CC_SET(c)
|
||||||
: CC_OUT(c) (oldbit), ADDR
|
: CC_OUT(c) (oldbit)
|
||||||
: "Ir" (nr) : "memory");
|
: ADDR, "Ir" (nr) : "memory");
|
||||||
|
|
||||||
return oldbit;
|
return oldbit;
|
||||||
}
|
}
|
||||||
@@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
|
|||||||
asm volatile(__ASM_SIZE(bt) " %2,%1"
|
asm volatile(__ASM_SIZE(bt) " %2,%1"
|
||||||
CC_SET(c)
|
CC_SET(c)
|
||||||
: CC_OUT(c) (oldbit)
|
: CC_OUT(c) (oldbit)
|
||||||
: "m" (*(unsigned long *)addr), "Ir" (nr));
|
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
|
||||||
|
|
||||||
return oldbit;
|
return oldbit;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -226,7 +226,9 @@ struct x86_emulate_ops {
|
|||||||
|
|
||||||
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
|
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
|
||||||
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
|
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
|
||||||
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
|
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
|
||||||
|
const char *smstate);
|
||||||
|
void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define KVM_PERMILLE_MMU_PAGES 20
|
#define KVM_PERMILLE_MMU_PAGES 20
|
||||||
#define KVM_MIN_ALLOC_MMU_PAGES 64
|
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
|
||||||
#define KVM_MMU_HASH_SHIFT 12
|
#define KVM_MMU_HASH_SHIFT 12
|
||||||
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
|
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
|
||||||
#define KVM_MIN_FREE_MMU_PAGES 5
|
#define KVM_MIN_FREE_MMU_PAGES 5
|
||||||
@@ -844,9 +844,9 @@ enum kvm_irqchip_mode {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_arch {
|
struct kvm_arch {
|
||||||
unsigned int n_used_mmu_pages;
|
unsigned long n_used_mmu_pages;
|
||||||
unsigned int n_requested_mmu_pages;
|
unsigned long n_requested_mmu_pages;
|
||||||
unsigned int n_max_mmu_pages;
|
unsigned long n_max_mmu_pages;
|
||||||
unsigned int indirect_shadow_pages;
|
unsigned int indirect_shadow_pages;
|
||||||
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
|
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
|
||||||
/*
|
/*
|
||||||
@@ -1182,7 +1182,7 @@ struct kvm_x86_ops {
|
|||||||
|
|
||||||
int (*smi_allowed)(struct kvm_vcpu *vcpu);
|
int (*smi_allowed)(struct kvm_vcpu *vcpu);
|
||||||
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
|
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
|
||||||
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
|
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
|
||||||
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
|
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
|
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
|
||||||
@@ -1256,8 +1256,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|||||||
gfn_t gfn_offset, unsigned long mask);
|
gfn_t gfn_offset, unsigned long mask);
|
||||||
void kvm_mmu_zap_all(struct kvm *kvm);
|
void kvm_mmu_zap_all(struct kvm *kvm);
|
||||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
|
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
|
||||||
unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
|
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
|
||||||
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
|
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
|
||||||
|
|
||||||
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
|
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
|
||||||
bool pdptrs_changed(struct kvm_vcpu *vcpu);
|
bool pdptrs_changed(struct kvm_vcpu *vcpu);
|
||||||
@@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
|
|||||||
#define put_smstate(type, buf, offset, val) \
|
#define put_smstate(type, buf, offset, val) \
|
||||||
*(type *)((buf) + (offset) - 0x7e00) = val
|
*(type *)((buf) + (offset) - 0x7e00) = val
|
||||||
|
|
||||||
|
#define GET_SMSTATE(type, buf, offset) \
|
||||||
|
(*(type *)((buf) + (offset) - 0x7e00))
|
||||||
|
|
||||||
#endif /* _ASM_X86_KVM_HOST_H */
|
#endif /* _ASM_X86_KVM_HOST_H */
|
||||||
|
|||||||
@@ -146,6 +146,7 @@
|
|||||||
|
|
||||||
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
|
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
|
||||||
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
|
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
|
||||||
|
#define VMX_ABORT_VMCS_CORRUPTED 3
|
||||||
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
|
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
|
||||||
|
|
||||||
#endif /* _UAPIVMX_H */
|
#endif /* _UAPIVMX_H */
|
||||||
|
|||||||
@@ -2039,14 +2039,14 @@ out:
|
|||||||
enum rdt_param {
|
enum rdt_param {
|
||||||
Opt_cdp,
|
Opt_cdp,
|
||||||
Opt_cdpl2,
|
Opt_cdpl2,
|
||||||
Opt_mba_mpbs,
|
Opt_mba_mbps,
|
||||||
nr__rdt_params
|
nr__rdt_params
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct fs_parameter_spec rdt_param_specs[] = {
|
static const struct fs_parameter_spec rdt_param_specs[] = {
|
||||||
fsparam_flag("cdp", Opt_cdp),
|
fsparam_flag("cdp", Opt_cdp),
|
||||||
fsparam_flag("cdpl2", Opt_cdpl2),
|
fsparam_flag("cdpl2", Opt_cdpl2),
|
||||||
fsparam_flag("mba_mpbs", Opt_mba_mpbs),
|
fsparam_flag("mba_MBps", Opt_mba_mbps),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
|||||||
case Opt_cdpl2:
|
case Opt_cdpl2:
|
||||||
ctx->enable_cdpl2 = true;
|
ctx->enable_cdpl2 = true;
|
||||||
return 0;
|
return 0;
|
||||||
case Opt_mba_mpbs:
|
case Opt_mba_mbps:
|
||||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ctx->enable_mba_mbps = true;
|
ctx->enable_mba_mbps = true;
|
||||||
|
|||||||
@@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
|
|||||||
|
|
||||||
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
|
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
u32 eax, ebx, ecx, edx;
|
u32 eax, ebx, ecx, edx;
|
||||||
|
|
||||||
eax = 0x80000001;
|
eax = 0x80000001;
|
||||||
ecx = 0;
|
ecx = 0;
|
||||||
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
|
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
|
||||||
return edx & bit(X86_FEATURE_LM);
|
return edx & bit(X86_FEATURE_LM);
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GET_SMSTATE(type, smbase, offset) \
|
|
||||||
({ \
|
|
||||||
type __val; \
|
|
||||||
int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
|
|
||||||
sizeof(__val)); \
|
|
||||||
if (r != X86EMUL_CONTINUE) \
|
|
||||||
return X86EMUL_UNHANDLEABLE; \
|
|
||||||
__val; \
|
|
||||||
})
|
|
||||||
|
|
||||||
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
|
static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
|
||||||
{
|
{
|
||||||
desc->g = (flags >> 23) & 1;
|
desc->g = (flags >> 23) & 1;
|
||||||
@@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
|
|||||||
desc->type = (flags >> 8) & 15;
|
desc->type = (flags >> 8) & 15;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
|
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
|
||||||
|
int n)
|
||||||
{
|
{
|
||||||
struct desc_struct desc;
|
struct desc_struct desc;
|
||||||
int offset;
|
int offset;
|
||||||
u16 selector;
|
u16 selector;
|
||||||
|
|
||||||
selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
|
selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
|
||||||
|
|
||||||
if (n < 3)
|
if (n < 3)
|
||||||
offset = 0x7f84 + n * 12;
|
offset = 0x7f84 + n * 12;
|
||||||
else
|
else
|
||||||
offset = 0x7f2c + (n - 3) * 12;
|
offset = 0x7f2c + (n - 3) * 12;
|
||||||
|
|
||||||
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
|
set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
|
||||||
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
|
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
|
||||||
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
|
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
|
||||||
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
|
ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
|
#ifdef CONFIG_X86_64
|
||||||
|
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
|
||||||
|
int n)
|
||||||
{
|
{
|
||||||
struct desc_struct desc;
|
struct desc_struct desc;
|
||||||
int offset;
|
int offset;
|
||||||
@@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
|
|||||||
|
|
||||||
offset = 0x7e00 + n * 16;
|
offset = 0x7e00 + n * 16;
|
||||||
|
|
||||||
selector = GET_SMSTATE(u16, smbase, offset);
|
selector = GET_SMSTATE(u16, smstate, offset);
|
||||||
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
|
rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
|
||||||
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
|
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
|
||||||
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
|
set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
|
||||||
base3 = GET_SMSTATE(u32, smbase, offset + 12);
|
base3 = GET_SMSTATE(u32, smstate, offset + 12);
|
||||||
|
|
||||||
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
|
ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
|
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
|
||||||
u64 cr0, u64 cr3, u64 cr4)
|
u64 cr0, u64 cr3, u64 cr4)
|
||||||
@@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
|
|||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
|
||||||
|
const char *smstate)
|
||||||
{
|
{
|
||||||
struct desc_struct desc;
|
struct desc_struct desc;
|
||||||
struct desc_ptr dt;
|
struct desc_ptr dt;
|
||||||
@@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
|||||||
u32 val, cr0, cr3, cr4;
|
u32 val, cr0, cr3, cr4;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
|
cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
|
||||||
cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
|
cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
|
||||||
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
|
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
|
||||||
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
|
ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
|
||||||
|
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
|
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
|
||||||
|
|
||||||
val = GET_SMSTATE(u32, smbase, 0x7fcc);
|
val = GET_SMSTATE(u32, smstate, 0x7fcc);
|
||||||
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
|
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
|
||||||
val = GET_SMSTATE(u32, smbase, 0x7fc8);
|
val = GET_SMSTATE(u32, smstate, 0x7fc8);
|
||||||
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
|
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
|
||||||
|
|
||||||
selector = GET_SMSTATE(u32, smbase, 0x7fc4);
|
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
|
||||||
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
|
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
|
||||||
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
|
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
|
||||||
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
|
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
|
||||||
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
|
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
|
||||||
|
|
||||||
selector = GET_SMSTATE(u32, smbase, 0x7fc0);
|
selector = GET_SMSTATE(u32, smstate, 0x7fc0);
|
||||||
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
|
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
|
||||||
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
|
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
|
||||||
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
|
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
|
||||||
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
|
ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
|
||||||
|
|
||||||
dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
|
dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
|
||||||
dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
|
dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
|
||||||
ctxt->ops->set_gdt(ctxt, &dt);
|
ctxt->ops->set_gdt(ctxt, &dt);
|
||||||
|
|
||||||
dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
|
dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
|
||||||
dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
|
dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
|
||||||
ctxt->ops->set_idt(ctxt, &dt);
|
ctxt->ops->set_idt(ctxt, &dt);
|
||||||
|
|
||||||
for (i = 0; i < 6; i++) {
|
for (i = 0; i < 6; i++) {
|
||||||
int r = rsm_load_seg_32(ctxt, smbase, i);
|
int r = rsm_load_seg_32(ctxt, smstate, i);
|
||||||
if (r != X86EMUL_CONTINUE)
|
if (r != X86EMUL_CONTINUE)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
|
cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
|
||||||
|
|
||||||
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
|
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
|
||||||
|
|
||||||
return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
|
return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
#ifdef CONFIG_X86_64
|
||||||
|
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
|
||||||
|
const char *smstate)
|
||||||
{
|
{
|
||||||
struct desc_struct desc;
|
struct desc_struct desc;
|
||||||
struct desc_ptr dt;
|
struct desc_ptr dt;
|
||||||
@@ -2509,43 +2510,43 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
|||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
|
*reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
|
||||||
|
|
||||||
ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
|
ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
|
||||||
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
|
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
|
||||||
|
|
||||||
val = GET_SMSTATE(u32, smbase, 0x7f68);
|
val = GET_SMSTATE(u32, smstate, 0x7f68);
|
||||||
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
|
ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
|
||||||
val = GET_SMSTATE(u32, smbase, 0x7f60);
|
val = GET_SMSTATE(u32, smstate, 0x7f60);
|
||||||
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
|
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
|
||||||
|
|
||||||
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
|
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
|
||||||
cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
|
cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
|
||||||
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
|
cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
|
||||||
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
|
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
|
||||||
val = GET_SMSTATE(u64, smbase, 0x7ed0);
|
val = GET_SMSTATE(u64, smstate, 0x7ed0);
|
||||||
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
|
ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
|
||||||
|
|
||||||
selector = GET_SMSTATE(u32, smbase, 0x7e90);
|
selector = GET_SMSTATE(u32, smstate, 0x7e90);
|
||||||
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
|
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
|
||||||
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
|
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
|
||||||
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
|
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
|
||||||
base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
|
base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
|
||||||
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
|
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
|
||||||
|
|
||||||
dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
|
dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
|
||||||
dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
|
dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
|
||||||
ctxt->ops->set_idt(ctxt, &dt);
|
ctxt->ops->set_idt(ctxt, &dt);
|
||||||
|
|
||||||
selector = GET_SMSTATE(u32, smbase, 0x7e70);
|
selector = GET_SMSTATE(u32, smstate, 0x7e70);
|
||||||
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
|
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
|
||||||
set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
|
set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
|
||||||
set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
|
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
|
||||||
base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
|
base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
|
||||||
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
|
ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
|
||||||
|
|
||||||
dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
|
dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
|
||||||
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
|
dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
|
||||||
ctxt->ops->set_gdt(ctxt, &dt);
|
ctxt->ops->set_gdt(ctxt, &dt);
|
||||||
|
|
||||||
r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
|
r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
|
||||||
@@ -2553,37 +2554,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
for (i = 0; i < 6; i++) {
|
for (i = 0; i < 6; i++) {
|
||||||
r = rsm_load_seg_64(ctxt, smbase, i);
|
r = rsm_load_seg_64(ctxt, smstate, i);
|
||||||
if (r != X86EMUL_CONTINUE)
|
if (r != X86EMUL_CONTINUE)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
unsigned long cr0, cr4, efer;
|
unsigned long cr0, cr4, efer;
|
||||||
|
char buf[512];
|
||||||
u64 smbase;
|
u64 smbase;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
|
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
|
||||||
return emulate_ud(ctxt);
|
return emulate_ud(ctxt);
|
||||||
|
|
||||||
|
smbase = ctxt->ops->get_smbase(ctxt);
|
||||||
|
|
||||||
|
ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
|
||||||
|
if (ret != X86EMUL_CONTINUE)
|
||||||
|
return X86EMUL_UNHANDLEABLE;
|
||||||
|
|
||||||
|
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
|
||||||
|
ctxt->ops->set_nmi_mask(ctxt, false);
|
||||||
|
|
||||||
|
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
|
||||||
|
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get back to real mode, to prepare a safe state in which to load
|
* Get back to real mode, to prepare a safe state in which to load
|
||||||
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
|
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
|
||||||
* supports long mode.
|
* supports long mode.
|
||||||
*/
|
*/
|
||||||
cr4 = ctxt->ops->get_cr(ctxt, 4);
|
|
||||||
if (emulator_has_longmode(ctxt)) {
|
if (emulator_has_longmode(ctxt)) {
|
||||||
struct desc_struct cs_desc;
|
struct desc_struct cs_desc;
|
||||||
|
|
||||||
/* Zero CR4.PCIDE before CR0.PG. */
|
/* Zero CR4.PCIDE before CR0.PG. */
|
||||||
if (cr4 & X86_CR4_PCIDE) {
|
cr4 = ctxt->ops->get_cr(ctxt, 4);
|
||||||
|
if (cr4 & X86_CR4_PCIDE)
|
||||||
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
|
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
|
||||||
cr4 &= ~X86_CR4_PCIDE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* A 32-bit code segment is required to clear EFER.LMA. */
|
/* A 32-bit code segment is required to clear EFER.LMA. */
|
||||||
memset(&cs_desc, 0, sizeof(cs_desc));
|
memset(&cs_desc, 0, sizeof(cs_desc));
|
||||||
@@ -2597,39 +2610,39 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||||||
if (cr0 & X86_CR0_PE)
|
if (cr0 & X86_CR0_PE)
|
||||||
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
|
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
|
||||||
|
|
||||||
/* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
|
if (emulator_has_longmode(ctxt)) {
|
||||||
if (cr4 & X86_CR4_PAE)
|
/* Clear CR4.PAE before clearing EFER.LME. */
|
||||||
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
|
cr4 = ctxt->ops->get_cr(ctxt, 4);
|
||||||
|
if (cr4 & X86_CR4_PAE)
|
||||||
|
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
|
||||||
|
|
||||||
/* And finally go back to 32-bit mode. */
|
/* And finally go back to 32-bit mode. */
|
||||||
efer = 0;
|
efer = 0;
|
||||||
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
|
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
|
||||||
|
}
|
||||||
smbase = ctxt->ops->get_smbase(ctxt);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Give pre_leave_smm() a chance to make ISA-specific changes to the
|
* Give pre_leave_smm() a chance to make ISA-specific changes to the
|
||||||
* vCPU state (e.g. enter guest mode) before loading state from the SMM
|
* vCPU state (e.g. enter guest mode) before loading state from the SMM
|
||||||
* state-save area.
|
* state-save area.
|
||||||
*/
|
*/
|
||||||
if (ctxt->ops->pre_leave_smm(ctxt, smbase))
|
if (ctxt->ops->pre_leave_smm(ctxt, buf))
|
||||||
return X86EMUL_UNHANDLEABLE;
|
return X86EMUL_UNHANDLEABLE;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
if (emulator_has_longmode(ctxt))
|
if (emulator_has_longmode(ctxt))
|
||||||
ret = rsm_load_state_64(ctxt, smbase + 0x8000);
|
ret = rsm_load_state_64(ctxt, buf);
|
||||||
else
|
else
|
||||||
ret = rsm_load_state_32(ctxt, smbase + 0x8000);
|
#endif
|
||||||
|
ret = rsm_load_state_32(ctxt, buf);
|
||||||
|
|
||||||
if (ret != X86EMUL_CONTINUE) {
|
if (ret != X86EMUL_CONTINUE) {
|
||||||
/* FIXME: should triple fault */
|
/* FIXME: should triple fault */
|
||||||
return X86EMUL_UNHANDLEABLE;
|
return X86EMUL_UNHANDLEABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
|
ctxt->ops->post_leave_smm(ctxt);
|
||||||
ctxt->ops->set_nmi_mask(ctxt, false);
|
|
||||||
|
|
||||||
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
|
|
||||||
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
|
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -138,6 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
|
|||||||
if (offset <= max_apic_id) {
|
if (offset <= max_apic_id) {
|
||||||
u8 cluster_size = min(max_apic_id - offset + 1, 16U);
|
u8 cluster_size = min(max_apic_id - offset + 1, 16U);
|
||||||
|
|
||||||
|
offset = array_index_nospec(offset, map->max_apic_id + 1);
|
||||||
*cluster = &map->phys_map[offset];
|
*cluster = &map->phys_map[offset];
|
||||||
*mask = dest_id & (0xffff >> (16 - cluster_size));
|
*mask = dest_id & (0xffff >> (16 - cluster_size));
|
||||||
} else {
|
} else {
|
||||||
@@ -901,7 +902,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
|
|||||||
if (irq->dest_id > map->max_apic_id) {
|
if (irq->dest_id > map->max_apic_id) {
|
||||||
*bitmap = 0;
|
*bitmap = 0;
|
||||||
} else {
|
} else {
|
||||||
*dst = &map->phys_map[irq->dest_id];
|
u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
|
||||||
|
*dst = &map->phys_map[dest_id];
|
||||||
*bitmap = 1;
|
*bitmap = 1;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
@@ -2007,7 +2007,7 @@ static int is_empty_shadow_page(u64 *spt)
|
|||||||
* aggregate version in order to make the slab shrinker
|
* aggregate version in order to make the slab shrinker
|
||||||
* faster
|
* faster
|
||||||
*/
|
*/
|
||||||
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
|
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
|
||||||
{
|
{
|
||||||
kvm->arch.n_used_mmu_pages += nr;
|
kvm->arch.n_used_mmu_pages += nr;
|
||||||
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
|
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
|
||||||
@@ -2238,7 +2238,7 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
|
|||||||
struct list_head *invalid_list,
|
struct list_head *invalid_list,
|
||||||
bool remote_flush)
|
bool remote_flush)
|
||||||
{
|
{
|
||||||
if (!remote_flush && !list_empty(invalid_list))
|
if (!remote_flush && list_empty(invalid_list))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!list_empty(invalid_list))
|
if (!list_empty(invalid_list))
|
||||||
@@ -2763,7 +2763,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
|
|||||||
* Changing the number of mmu pages allocated to the vm
|
* Changing the number of mmu pages allocated to the vm
|
||||||
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
|
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
|
||||||
*/
|
*/
|
||||||
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
|
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
|
||||||
{
|
{
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
|
||||||
@@ -6031,10 +6031,10 @@ out:
|
|||||||
/*
|
/*
|
||||||
* Calculate mmu pages needed for kvm.
|
* Calculate mmu pages needed for kvm.
|
||||||
*/
|
*/
|
||||||
unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
|
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
unsigned int nr_mmu_pages;
|
unsigned long nr_mmu_pages;
|
||||||
unsigned int nr_pages = 0;
|
unsigned long nr_pages = 0;
|
||||||
struct kvm_memslots *slots;
|
struct kvm_memslots *slots;
|
||||||
struct kvm_memory_slot *memslot;
|
struct kvm_memory_slot *memslot;
|
||||||
int i;
|
int i;
|
||||||
@@ -6047,8 +6047,7 @@ unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
|
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
|
||||||
nr_mmu_pages = max(nr_mmu_pages,
|
nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
|
||||||
(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
|
|
||||||
|
|
||||||
return nr_mmu_pages;
|
return nr_mmu_pages;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
|||||||
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
|
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
|
||||||
u64 fault_address, char *insn, int insn_len);
|
u64 fault_address, char *insn, int insn_len);
|
||||||
|
|
||||||
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
|
if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
|
||||||
return kvm->arch.n_max_mmu_pages -
|
return kvm->arch.n_max_mmu_pages -
|
||||||
|
|||||||
@@ -281,9 +281,13 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
|||||||
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||||
{
|
{
|
||||||
bool fast_mode = idx & (1u << 31);
|
bool fast_mode = idx & (1u << 31);
|
||||||
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||||
struct kvm_pmc *pmc;
|
struct kvm_pmc *pmc;
|
||||||
u64 ctr_val;
|
u64 ctr_val;
|
||||||
|
|
||||||
|
if (!pmu->version)
|
||||||
|
return 1;
|
||||||
|
|
||||||
if (is_vmware_backdoor_pmc(idx))
|
if (is_vmware_backdoor_pmc(idx))
|
||||||
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
|
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
|
||||||
|
|
||||||
|
|||||||
@@ -262,6 +262,7 @@ struct amd_svm_iommu_ir {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
|
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
|
||||||
|
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
|
||||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
|
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
|
||||||
|
|
||||||
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
|
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
|
||||||
@@ -2692,6 +2693,7 @@ static int npf_interception(struct vcpu_svm *svm)
|
|||||||
static int db_interception(struct vcpu_svm *svm)
|
static int db_interception(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
struct kvm_run *kvm_run = svm->vcpu.run;
|
struct kvm_run *kvm_run = svm->vcpu.run;
|
||||||
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||||
|
|
||||||
if (!(svm->vcpu.guest_debug &
|
if (!(svm->vcpu.guest_debug &
|
||||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
|
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
|
||||||
@@ -2702,6 +2704,8 @@ static int db_interception(struct vcpu_svm *svm)
|
|||||||
|
|
||||||
if (svm->nmi_singlestep) {
|
if (svm->nmi_singlestep) {
|
||||||
disable_nmi_singlestep(svm);
|
disable_nmi_singlestep(svm);
|
||||||
|
/* Make sure we check for pending NMIs upon entry */
|
||||||
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (svm->vcpu.guest_debug &
|
if (svm->vcpu.guest_debug &
|
||||||
@@ -4517,14 +4521,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
|
|||||||
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
||||||
break;
|
break;
|
||||||
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
|
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
|
||||||
|
int i;
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
struct kvm *kvm = svm->vcpu.kvm;
|
||||||
struct kvm_lapic *apic = svm->vcpu.arch.apic;
|
struct kvm_lapic *apic = svm->vcpu.arch.apic;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update ICR high and low, then emulate sending IPI,
|
* At this point, we expect that the AVIC HW has already
|
||||||
* which is handled when writing APIC_ICR.
|
* set the appropriate IRR bits on the valid target
|
||||||
|
* vcpus. So, we just need to kick the appropriate vcpu.
|
||||||
*/
|
*/
|
||||||
kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
bool m = kvm_apic_match_dest(vcpu, apic,
|
||||||
|
icrl & KVM_APIC_SHORT_MASK,
|
||||||
|
GET_APIC_DEST_FIELD(icrh),
|
||||||
|
icrl & KVM_APIC_DEST_MASK);
|
||||||
|
|
||||||
|
if (m && !avic_vcpu_is_running(vcpu))
|
||||||
|
kvm_vcpu_wake_up(vcpu);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||||
@@ -4596,7 +4611,7 @@ static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
|
|||||||
u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
|
u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
|
||||||
|
|
||||||
if (entry)
|
if (entry)
|
||||||
WRITE_ONCE(*entry, (u32) ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK);
|
clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
|
static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
|
||||||
@@ -5621,6 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
svm->vmcb->save.cr2 = vcpu->arch.cr2;
|
svm->vmcb->save.cr2 = vcpu->arch.cr2;
|
||||||
|
|
||||||
clgi();
|
clgi();
|
||||||
|
kvm_load_guest_xcr0(vcpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
||||||
@@ -5766,6 +5782,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
|
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
|
||||||
kvm_before_interrupt(&svm->vcpu);
|
kvm_before_interrupt(&svm->vcpu);
|
||||||
|
|
||||||
|
kvm_put_guest_xcr0(vcpu);
|
||||||
stgi();
|
stgi();
|
||||||
|
|
||||||
/* Any pending NMI will happen here */
|
/* Any pending NMI will happen here */
|
||||||
@@ -6215,32 +6232,24 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
|
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||||
{
|
{
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
struct vmcb *nested_vmcb;
|
struct vmcb *nested_vmcb;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct {
|
u64 guest;
|
||||||
u64 guest;
|
u64 vmcb;
|
||||||
u64 vmcb;
|
|
||||||
} svm_state_save;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
|
guest = GET_SMSTATE(u64, smstate, 0x7ed8);
|
||||||
sizeof(svm_state_save));
|
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (svm_state_save.guest) {
|
if (guest) {
|
||||||
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
nested_vmcb = nested_svm_map(svm, vmcb, &page);
|
||||||
nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
|
if (!nested_vmcb)
|
||||||
if (nested_vmcb)
|
return 1;
|
||||||
enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
|
enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
|
||||||
else
|
|
||||||
ret = 1;
|
|
||||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
|
||||||
}
|
}
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int enable_smi_window(struct kvm_vcpu *vcpu)
|
static int enable_smi_window(struct kvm_vcpu *vcpu)
|
||||||
|
|||||||
@@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi,
|
|||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(kvm_apic_accept_irq,
|
TRACE_EVENT(kvm_apic_accept_irq,
|
||||||
TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
|
TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
|
||||||
TP_ARGS(apicid, dm, tm, vec),
|
TP_ARGS(apicid, dm, tm, vec),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( __u32, apicid )
|
__field( __u32, apicid )
|
||||||
__field( __u16, dm )
|
__field( __u16, dm )
|
||||||
__field( __u8, tm )
|
__field( __u16, tm )
|
||||||
__field( __u8, vec )
|
__field( __u8, vec )
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|||||||
@@ -2873,20 +2873,27 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
|||||||
/*
|
/*
|
||||||
* If translation failed, VM entry will fail because
|
* If translation failed, VM entry will fail because
|
||||||
* prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
|
* prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
|
||||||
* Failing the vm entry is _not_ what the processor
|
|
||||||
* does but it's basically the only possibility we
|
|
||||||
* have. We could still enter the guest if CR8 load
|
|
||||||
* exits are enabled, CR8 store exits are enabled, and
|
|
||||||
* virtualize APIC access is disabled; in this case
|
|
||||||
* the processor would never use the TPR shadow and we
|
|
||||||
* could simply clear the bit from the execution
|
|
||||||
* control. But such a configuration is useless, so
|
|
||||||
* let's keep the code simple.
|
|
||||||
*/
|
*/
|
||||||
if (!is_error_page(page)) {
|
if (!is_error_page(page)) {
|
||||||
vmx->nested.virtual_apic_page = page;
|
vmx->nested.virtual_apic_page = page;
|
||||||
hpa = page_to_phys(vmx->nested.virtual_apic_page);
|
hpa = page_to_phys(vmx->nested.virtual_apic_page);
|
||||||
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
|
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
|
||||||
|
} else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
|
||||||
|
nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
|
||||||
|
!nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
||||||
|
/*
|
||||||
|
* The processor will never use the TPR shadow, simply
|
||||||
|
* clear the bit from the execution control. Such a
|
||||||
|
* configuration is useless, but it happens in tests.
|
||||||
|
* For any other configuration, failing the vm entry is
|
||||||
|
* _not_ what the processor does but it's basically the
|
||||||
|
* only possibility we have.
|
||||||
|
*/
|
||||||
|
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
|
||||||
|
CPU_BASED_TPR_SHADOW);
|
||||||
|
} else {
|
||||||
|
printk("bad virtual-APIC page address\n");
|
||||||
|
dump_vmcs();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3789,8 +3796,18 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
|||||||
vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
|
vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
|
||||||
|
|
||||||
nested_ept_uninit_mmu_context(vcpu);
|
nested_ept_uninit_mmu_context(vcpu);
|
||||||
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
|
|
||||||
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
|
/*
|
||||||
|
* This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
|
||||||
|
* points to shadow pages! Fortunately we only get here after a WARN_ON
|
||||||
|
* if EPT is disabled, so a VMabort is perfectly fine.
|
||||||
|
*/
|
||||||
|
if (enable_ept) {
|
||||||
|
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
|
||||||
|
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
|
||||||
|
} else {
|
||||||
|
nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
|
* Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
|
||||||
@@ -5738,6 +5755,14 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Without EPT it is not possible to restore L1's CR3 and PDPTR on
|
||||||
|
* VMfail, because they are not available in vmcs01. Just always
|
||||||
|
* use hardware checks.
|
||||||
|
*/
|
||||||
|
if (!enable_ept)
|
||||||
|
nested_early_check = 1;
|
||||||
|
|
||||||
if (!cpu_has_vmx_shadow_vmcs())
|
if (!cpu_has_vmx_shadow_vmcs())
|
||||||
enable_shadow_vmcs = 0;
|
enable_shadow_vmcs = 0;
|
||||||
if (enable_shadow_vmcs) {
|
if (enable_shadow_vmcs) {
|
||||||
|
|||||||
@@ -5603,7 +5603,7 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
|
|||||||
vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
|
vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dump_vmcs(void)
|
void dump_vmcs(void)
|
||||||
{
|
{
|
||||||
u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
|
u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
|
||||||
u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
|
u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
|
||||||
@@ -6410,6 +6410,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||||
vmx_set_interrupt_shadow(vcpu, 0);
|
vmx_set_interrupt_shadow(vcpu, 0);
|
||||||
|
|
||||||
|
kvm_load_guest_xcr0(vcpu);
|
||||||
|
|
||||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
||||||
vcpu->arch.pkru != vmx->host_pkru)
|
vcpu->arch.pkru != vmx->host_pkru)
|
||||||
@@ -6506,6 +6508,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
__write_pkru(vmx->host_pkru);
|
__write_pkru(vmx->host_pkru);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kvm_put_guest_xcr0(vcpu);
|
||||||
|
|
||||||
vmx->nested.nested_run_pending = 0;
|
vmx->nested.nested_run_pending = 0;
|
||||||
vmx->idt_vectoring_info = 0;
|
vmx->idt_vectoring_info = 0;
|
||||||
|
|
||||||
@@ -6852,6 +6856,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_cpuid_entry2 *entry;
|
||||||
|
union cpuid10_eax eax;
|
||||||
|
|
||||||
|
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
|
||||||
|
if (!entry)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
eax.full = entry->eax;
|
||||||
|
return (eax.split.version_id > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
|
||||||
|
|
||||||
|
if (pmu_enabled)
|
||||||
|
vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
|
||||||
|
else
|
||||||
|
vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
|
||||||
|
}
|
||||||
|
|
||||||
static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
|
static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
@@ -6940,6 +6968,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||||||
if (nested_vmx_allowed(vcpu)) {
|
if (nested_vmx_allowed(vcpu)) {
|
||||||
nested_vmx_cr_fixed1_bits_update(vcpu);
|
nested_vmx_cr_fixed1_bits_update(vcpu);
|
||||||
nested_vmx_entry_exit_ctls_update(vcpu);
|
nested_vmx_entry_exit_ctls_update(vcpu);
|
||||||
|
nested_vmx_procbased_ctls_update(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
|
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
|
||||||
@@ -7369,7 +7398,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
|
static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
int ret;
|
int ret;
|
||||||
@@ -7380,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vmx->nested.smm.guest_mode) {
|
if (vmx->nested.smm.guest_mode) {
|
||||||
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
|
||||||
ret = nested_vmx_enter_non_root_mode(vcpu, false);
|
ret = nested_vmx_enter_non_root_mode(vcpu, false);
|
||||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|||||||
@@ -517,4 +517,6 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
|
|||||||
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dump_vmcs(void);
|
||||||
|
|
||||||
#endif /* __KVM_X86_VMX_H */
|
#endif /* __KVM_X86_VMX_H */
|
||||||
|
|||||||
@@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_lmsw);
|
EXPORT_SYMBOL_GPL(kvm_lmsw);
|
||||||
|
|
||||||
static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
|
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
|
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
|
||||||
!vcpu->guest_xcr0_loaded) {
|
!vcpu->guest_xcr0_loaded) {
|
||||||
@@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->guest_xcr0_loaded = 1;
|
vcpu->guest_xcr0_loaded = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
|
||||||
|
|
||||||
static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
|
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (vcpu->guest_xcr0_loaded) {
|
if (vcpu->guest_xcr0_loaded) {
|
||||||
if (vcpu->arch.xcr0 != host_xcr0)
|
if (vcpu->arch.xcr0 != host_xcr0)
|
||||||
@@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->guest_xcr0_loaded = 0;
|
vcpu->guest_xcr0_loaded = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
|
||||||
|
|
||||||
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||||
{
|
{
|
||||||
@@ -3093,7 +3095,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||||||
break;
|
break;
|
||||||
case KVM_CAP_NESTED_STATE:
|
case KVM_CAP_NESTED_STATE:
|
||||||
r = kvm_x86_ops->get_nested_state ?
|
r = kvm_x86_ops->get_nested_state ?
|
||||||
kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
|
kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@@ -3528,7 +3530,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
|||||||
memset(&events->reserved, 0, sizeof(events->reserved));
|
memset(&events->reserved, 0, sizeof(events->reserved));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
|
static void kvm_smm_changed(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_vcpu_events *events)
|
struct kvm_vcpu_events *events)
|
||||||
@@ -3588,12 +3590,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|||||||
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
||||||
|
|
||||||
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
||||||
u32 hflags = vcpu->arch.hflags;
|
if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
|
||||||
if (events->smi.smm)
|
if (events->smi.smm)
|
||||||
hflags |= HF_SMM_MASK;
|
vcpu->arch.hflags |= HF_SMM_MASK;
|
||||||
else
|
else
|
||||||
hflags &= ~HF_SMM_MASK;
|
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
||||||
kvm_set_hflags(vcpu, hflags);
|
kvm_smm_changed(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
vcpu->arch.smi_pending = events->smi.pending;
|
vcpu->arch.smi_pending = events->smi.pending;
|
||||||
|
|
||||||
@@ -4270,7 +4273,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
|
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
|
||||||
u32 kvm_nr_mmu_pages)
|
unsigned long kvm_nr_mmu_pages)
|
||||||
{
|
{
|
||||||
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
|
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -4284,7 +4287,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
|
static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
return kvm->arch.n_max_mmu_pages;
|
return kvm->arch.n_max_mmu_pages;
|
||||||
}
|
}
|
||||||
@@ -5958,12 +5961,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
|
|||||||
|
|
||||||
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
|
static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
|
||||||
{
|
{
|
||||||
kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
|
emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
|
||||||
|
const char *smstate)
|
||||||
{
|
{
|
||||||
return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
|
return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
|
||||||
|
{
|
||||||
|
kvm_smm_changed(emul_to_vcpu(ctxt));
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct x86_emulate_ops emulate_ops = {
|
static const struct x86_emulate_ops emulate_ops = {
|
||||||
@@ -6006,6 +6015,7 @@ static const struct x86_emulate_ops emulate_ops = {
|
|||||||
.get_hflags = emulator_get_hflags,
|
.get_hflags = emulator_get_hflags,
|
||||||
.set_hflags = emulator_set_hflags,
|
.set_hflags = emulator_set_hflags,
|
||||||
.pre_leave_smm = emulator_pre_leave_smm,
|
.pre_leave_smm = emulator_pre_leave_smm,
|
||||||
|
.post_leave_smm = emulator_post_leave_smm,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
|
||||||
@@ -6247,16 +6257,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
|
|||||||
kvm_mmu_reset_context(vcpu);
|
kvm_mmu_reset_context(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
|
|
||||||
{
|
|
||||||
unsigned changed = vcpu->arch.hflags ^ emul_flags;
|
|
||||||
|
|
||||||
vcpu->arch.hflags = emul_flags;
|
|
||||||
|
|
||||||
if (changed & HF_SMM_MASK)
|
|
||||||
kvm_smm_changed(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
|
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
|
||||||
unsigned long *db)
|
unsigned long *db)
|
||||||
{
|
{
|
||||||
@@ -7441,9 +7441,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
|
|||||||
put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
|
put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
struct desc_ptr dt;
|
struct desc_ptr dt;
|
||||||
struct kvm_segment seg;
|
struct kvm_segment seg;
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
@@ -7493,10 +7493,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
|
|||||||
|
|
||||||
for (i = 0; i < 6; i++)
|
for (i = 0; i < 6; i++)
|
||||||
enter_smm_save_seg_64(vcpu, buf, i);
|
enter_smm_save_seg_64(vcpu, buf, i);
|
||||||
#else
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void enter_smm(struct kvm_vcpu *vcpu)
|
static void enter_smm(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
@@ -7507,9 +7505,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
|
||||||
memset(buf, 0, 512);
|
memset(buf, 0, 512);
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||||
enter_smm_save_state_64(vcpu, buf);
|
enter_smm_save_state_64(vcpu, buf);
|
||||||
else
|
else
|
||||||
|
#endif
|
||||||
enter_smm_save_state_32(vcpu, buf);
|
enter_smm_save_state_32(vcpu, buf);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -7567,8 +7567,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
|
|||||||
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
|
||||||
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
|
||||||
kvm_x86_ops->set_efer(vcpu, 0);
|
kvm_x86_ops->set_efer(vcpu, 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
kvm_update_cpuid(vcpu);
|
kvm_update_cpuid(vcpu);
|
||||||
kvm_mmu_reset_context(vcpu);
|
kvm_mmu_reset_context(vcpu);
|
||||||
@@ -7865,8 +7867,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
goto cancel_injection;
|
goto cancel_injection;
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_load_guest_xcr0(vcpu);
|
|
||||||
|
|
||||||
if (req_immediate_exit) {
|
if (req_immediate_exit) {
|
||||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
kvm_x86_ops->request_immediate_exit(vcpu);
|
kvm_x86_ops->request_immediate_exit(vcpu);
|
||||||
@@ -7919,8 +7919,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
kvm_put_guest_xcr0(vcpu);
|
|
||||||
|
|
||||||
kvm_before_interrupt(vcpu);
|
kvm_before_interrupt(vcpu);
|
||||||
kvm_x86_ops->handle_external_intr(vcpu);
|
kvm_x86_ops->handle_external_intr(vcpu);
|
||||||
kvm_after_interrupt(vcpu);
|
kvm_after_interrupt(vcpu);
|
||||||
|
|||||||
@@ -347,4 +347,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
|
|||||||
__this_cpu_write(current_vcpu, NULL);
|
__this_cpu_write(current_vcpu, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
|
||||||
|
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -2822,7 +2822,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
|
|||||||
bfq_remove_request(q, rq);
|
bfq_remove_request(q, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* If this bfqq is shared between multiple processes, check
|
* If this bfqq is shared between multiple processes, check
|
||||||
@@ -2855,9 +2855,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
|||||||
/*
|
/*
|
||||||
* All in-service entities must have been properly deactivated
|
* All in-service entities must have been properly deactivated
|
||||||
* or requeued before executing the next function, which
|
* or requeued before executing the next function, which
|
||||||
* resets all in-service entites as no more in service.
|
* resets all in-service entities as no more in service. This
|
||||||
|
* may cause bfqq to be freed. If this happens, the next
|
||||||
|
* function returns true.
|
||||||
*/
|
*/
|
||||||
__bfq_bfqd_reset_in_service(bfqd);
|
return __bfq_bfqd_reset_in_service(bfqd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -3262,7 +3264,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
|
|||||||
bool slow;
|
bool slow;
|
||||||
unsigned long delta = 0;
|
unsigned long delta = 0;
|
||||||
struct bfq_entity *entity = &bfqq->entity;
|
struct bfq_entity *entity = &bfqq->entity;
|
||||||
int ref;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether the process is slow (see bfq_bfqq_is_slow).
|
* Check whether the process is slow (see bfq_bfqq_is_slow).
|
||||||
@@ -3347,10 +3348,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
|
|||||||
* reason.
|
* reason.
|
||||||
*/
|
*/
|
||||||
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
|
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
|
||||||
ref = bfqq->ref;
|
if (__bfq_bfqq_expire(bfqd, bfqq))
|
||||||
__bfq_bfqq_expire(bfqd, bfqq);
|
/* bfqq is gone, no more actions on it */
|
||||||
|
|
||||||
if (ref == 1) /* bfqq is gone, no more actions on it */
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bfqq->injected_service = 0;
|
bfqq->injected_service = 0;
|
||||||
|
|||||||
@@ -995,7 +995,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
|
|||||||
bool ins_into_idle_tree);
|
bool ins_into_idle_tree);
|
||||||
bool next_queue_may_preempt(struct bfq_data *bfqd);
|
bool next_queue_may_preempt(struct bfq_data *bfqd);
|
||||||
struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
|
struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
|
||||||
void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
|
bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
|
||||||
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||||
bool ins_into_idle_tree, bool expiration);
|
bool ins_into_idle_tree, bool expiration);
|
||||||
void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
|
void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
|
||||||
|
|||||||
@@ -1605,7 +1605,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
|
|||||||
return bfqq;
|
return bfqq;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
|
/* returns true if the in-service queue gets freed */
|
||||||
|
bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
|
||||||
{
|
{
|
||||||
struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
|
struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
|
||||||
struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
|
struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
|
||||||
@@ -1629,8 +1630,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
|
|||||||
* service tree either, then release the service reference to
|
* service tree either, then release the service reference to
|
||||||
* the queue it represents (taken with bfq_get_entity).
|
* the queue it represents (taken with bfq_get_entity).
|
||||||
*/
|
*/
|
||||||
if (!in_serv_entity->on_st)
|
if (!in_serv_entity->on_st) {
|
||||||
|
/*
|
||||||
|
* If no process is referencing in_serv_bfqq any
|
||||||
|
* longer, then the service reference may be the only
|
||||||
|
* reference to the queue. If this is the case, then
|
||||||
|
* bfqq gets freed here.
|
||||||
|
*/
|
||||||
|
int ref = in_serv_bfqq->ref;
|
||||||
bfq_put_queue(in_serv_bfqq);
|
bfq_put_queue(in_serv_bfqq);
|
||||||
|
if (ref == 1)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||||
|
|||||||
@@ -1298,8 +1298,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
|
if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
|
||||||
|
if (!map_data)
|
||||||
|
__free_page(page);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
len -= bytes;
|
len -= bytes;
|
||||||
offset = 0;
|
offset = 0;
|
||||||
|
|||||||
@@ -654,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_complete_request);
|
EXPORT_SYMBOL(blk_mq_complete_request);
|
||||||
|
|
||||||
|
void blk_mq_complete_request_sync(struct request *rq)
|
||||||
|
{
|
||||||
|
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
|
||||||
|
rq->q->mq_ops->complete(rq);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
|
||||||
|
|
||||||
int blk_mq_request_started(struct request *rq)
|
int blk_mq_request_started(struct request *rq)
|
||||||
{
|
{
|
||||||
return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
|
return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
|
||||||
|
|||||||
@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (obj_desc->common.type == ACPI_TYPE_REGION) {
|
||||||
|
acpi_ut_remove_address_range(obj_desc->region.space_id, node);
|
||||||
|
}
|
||||||
|
|
||||||
/* Clear the Node entry in all cases */
|
/* Clear the Node entry in all cases */
|
||||||
|
|
||||||
node->object = NULL;
|
node->object = NULL;
|
||||||
|
|||||||
@@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
|
||||||
|
cmd_name, out_obj->buffer.length);
|
||||||
|
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
|
||||||
|
out_obj->buffer.pointer,
|
||||||
|
min_t(u32, 128, out_obj->buffer.length), true);
|
||||||
|
|
||||||
if (call_pkg) {
|
if (call_pkg) {
|
||||||
call_pkg->nd_fw_size = out_obj->buffer.length;
|
call_pkg->nd_fw_size = out_obj->buffer.length;
|
||||||
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
|
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
|
||||||
@@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
|
|
||||||
cmd_name, out_obj->buffer.length);
|
|
||||||
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
|
|
||||||
out_obj->buffer.pointer,
|
|
||||||
min_t(u32, 128, out_obj->buffer.length), true);
|
|
||||||
|
|
||||||
for (i = 0, offset = 0; i < desc->out_num; i++) {
|
for (i = 0, offset = 0; i < desc->out_num; i++) {
|
||||||
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
|
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
|
||||||
(u32 *) out_obj->buffer.pointer,
|
(u32 *) out_obj->buffer.pointer,
|
||||||
|
|||||||
@@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
|
|||||||
if (!test_bit(cmd, &nfit_mem->dsm_mask))
|
if (!test_bit(cmd, &nfit_mem->dsm_mask))
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
|
|
||||||
if (old_data)
|
memcpy(nd_cmd.cmd.old_pass, old_data->data,
|
||||||
memcpy(nd_cmd.cmd.old_pass, old_data->data,
|
sizeof(nd_cmd.cmd.old_pass));
|
||||||
sizeof(nd_cmd.cmd.old_pass));
|
|
||||||
memcpy(nd_cmd.cmd.new_pass, new_data->data,
|
memcpy(nd_cmd.cmd.new_pass, new_data->data,
|
||||||
sizeof(nd_cmd.cmd.new_pass));
|
sizeof(nd_cmd.cmd.new_pass));
|
||||||
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
||||||
@@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
|
|||||||
|
|
||||||
/* flush all cache before we erase DIMM */
|
/* flush all cache before we erase DIMM */
|
||||||
nvdimm_invalidate_cache();
|
nvdimm_invalidate_cache();
|
||||||
if (nkey)
|
memcpy(nd_cmd.cmd.passphrase, nkey->data,
|
||||||
memcpy(nd_cmd.cmd.passphrase, nkey->data,
|
sizeof(nd_cmd.cmd.passphrase));
|
||||||
sizeof(nd_cmd.cmd.passphrase));
|
|
||||||
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|||||||
@@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk)
|
|||||||
if (err)
|
if (err)
|
||||||
num_vqs = 1;
|
num_vqs = 1;
|
||||||
|
|
||||||
|
num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
|
||||||
|
|
||||||
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
|
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
|
||||||
if (!vblk->vqs)
|
if (!vblk->vqs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
@@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||||
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
|
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
|
||||||
0, "OOB Wake-on-BT", data);
|
0, "OOB Wake-on-BT", data);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
data->oob_wake_irq = irq;
|
data->oob_wake_irq = irq;
|
||||||
disable_irq(irq);
|
|
||||||
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
|
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&p, 0, sizeof(p));
|
|
||||||
p.addr = base_addr;
|
p.addr = base_addr;
|
||||||
p.space = space;
|
p.space = space;
|
||||||
p.regspacing = offset;
|
p.regspacing = offset;
|
||||||
|
|||||||
@@ -214,6 +214,9 @@ struct ipmi_user {
|
|||||||
|
|
||||||
/* Does this interface receive IPMI events? */
|
/* Does this interface receive IPMI events? */
|
||||||
bool gets_events;
|
bool gets_events;
|
||||||
|
|
||||||
|
/* Free must run in process context for RCU cleanup. */
|
||||||
|
struct work_struct remove_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
||||||
@@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
|
|||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void free_user_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct ipmi_user *user = container_of(work, struct ipmi_user,
|
||||||
|
remove_work);
|
||||||
|
|
||||||
|
cleanup_srcu_struct(&user->release_barrier);
|
||||||
|
kfree(user);
|
||||||
|
}
|
||||||
|
|
||||||
int ipmi_create_user(unsigned int if_num,
|
int ipmi_create_user(unsigned int if_num,
|
||||||
const struct ipmi_user_hndl *handler,
|
const struct ipmi_user_hndl *handler,
|
||||||
void *handler_data,
|
void *handler_data,
|
||||||
@@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int if_num,
|
|||||||
goto out_kfree;
|
goto out_kfree;
|
||||||
|
|
||||||
found:
|
found:
|
||||||
|
INIT_WORK(&new_user->remove_work, free_user_work);
|
||||||
|
|
||||||
rv = init_srcu_struct(&new_user->release_barrier);
|
rv = init_srcu_struct(&new_user->release_barrier);
|
||||||
if (rv)
|
if (rv)
|
||||||
goto out_kfree;
|
goto out_kfree;
|
||||||
@@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
|
|||||||
static void free_user(struct kref *ref)
|
static void free_user(struct kref *ref)
|
||||||
{
|
{
|
||||||
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
||||||
cleanup_srcu_struct(&user->release_barrier);
|
|
||||||
kfree(user);
|
/* SRCU cleanup must happen in task context. */
|
||||||
|
schedule_work(&user->remove_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ipmi_destroy_user(struct ipmi_user *user)
|
static void _ipmi_destroy_user(struct ipmi_user *user)
|
||||||
|
|||||||
@@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void)
|
|||||||
char *str;
|
char *str;
|
||||||
char *si_type[SI_MAX_PARMS];
|
char *si_type[SI_MAX_PARMS];
|
||||||
|
|
||||||
|
memset(si_type, 0, sizeof(si_type));
|
||||||
|
|
||||||
/* Parse out the si_type string into its components. */
|
/* Parse out the si_type string into its components. */
|
||||||
str = si_type_str;
|
str = si_type_str;
|
||||||
if (*str != '\0') {
|
if (*str != '\0') {
|
||||||
|
|||||||
@@ -20,8 +20,7 @@
|
|||||||
#define PROG_ID_MAX 7
|
#define PROG_ID_MAX 7
|
||||||
|
|
||||||
#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
|
#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
|
||||||
#define PROG_PRES_MASK 0x7
|
#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & layout->pres_mask)
|
||||||
#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
|
|
||||||
#define PROG_MAX_RM9200_CSS 3
|
#define PROG_MAX_RM9200_CSS 3
|
||||||
|
|
||||||
struct clk_programmable {
|
struct clk_programmable {
|
||||||
@@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
|
|||||||
unsigned long parent_rate)
|
unsigned long parent_rate)
|
||||||
{
|
{
|
||||||
struct clk_programmable *prog = to_clk_programmable(hw);
|
struct clk_programmable *prog = to_clk_programmable(hw);
|
||||||
|
const struct clk_programmable_layout *layout = prog->layout;
|
||||||
unsigned int pckr;
|
unsigned int pckr;
|
||||||
|
unsigned long rate;
|
||||||
|
|
||||||
regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
|
regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
|
||||||
|
|
||||||
return parent_rate >> PROG_PRES(prog->layout, pckr);
|
if (layout->is_pres_direct)
|
||||||
|
rate = parent_rate / (PROG_PRES(layout, pckr) + 1);
|
||||||
|
else
|
||||||
|
rate = parent_rate >> PROG_PRES(layout, pckr);
|
||||||
|
|
||||||
|
return rate;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int clk_programmable_determine_rate(struct clk_hw *hw,
|
static int clk_programmable_determine_rate(struct clk_hw *hw,
|
||||||
struct clk_rate_request *req)
|
struct clk_rate_request *req)
|
||||||
{
|
{
|
||||||
|
struct clk_programmable *prog = to_clk_programmable(hw);
|
||||||
|
const struct clk_programmable_layout *layout = prog->layout;
|
||||||
struct clk_hw *parent;
|
struct clk_hw *parent;
|
||||||
long best_rate = -EINVAL;
|
long best_rate = -EINVAL;
|
||||||
unsigned long parent_rate;
|
unsigned long parent_rate;
|
||||||
unsigned long tmp_rate;
|
unsigned long tmp_rate = 0;
|
||||||
int shift;
|
int shift;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
parent_rate = clk_hw_get_rate(parent);
|
parent_rate = clk_hw_get_rate(parent);
|
||||||
for (shift = 0; shift < PROG_PRES_MASK; shift++) {
|
if (layout->is_pres_direct) {
|
||||||
tmp_rate = parent_rate >> shift;
|
for (shift = 0; shift <= layout->pres_mask; shift++) {
|
||||||
if (tmp_rate <= req->rate)
|
tmp_rate = parent_rate / (shift + 1);
|
||||||
break;
|
if (tmp_rate <= req->rate)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (shift = 0; shift < layout->pres_mask; shift++) {
|
||||||
|
tmp_rate = parent_rate >> shift;
|
||||||
|
if (tmp_rate <= req->rate)
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tmp_rate > req->rate)
|
if (tmp_rate > req->rate)
|
||||||
@@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
if (!div)
|
if (!div)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
shift = fls(div) - 1;
|
if (layout->is_pres_direct) {
|
||||||
|
shift = div - 1;
|
||||||
|
|
||||||
if (div != (1 << shift))
|
if (shift > layout->pres_mask)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
} else {
|
||||||
|
shift = fls(div) - 1;
|
||||||
|
|
||||||
if (shift >= PROG_PRES_MASK)
|
if (div != (1 << shift))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (shift >= layout->pres_mask)
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
|
regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
|
||||||
PROG_PRES_MASK << layout->pres_shift,
|
layout->pres_mask << layout->pres_shift,
|
||||||
shift << layout->pres_shift);
|
shift << layout->pres_shift);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const struct clk_programmable_layout at91rm9200_programmable_layout = {
|
const struct clk_programmable_layout at91rm9200_programmable_layout = {
|
||||||
|
.pres_mask = 0x7,
|
||||||
.pres_shift = 2,
|
.pres_shift = 2,
|
||||||
.css_mask = 0x3,
|
.css_mask = 0x3,
|
||||||
.have_slck_mck = 0,
|
.have_slck_mck = 0,
|
||||||
|
.is_pres_direct = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct clk_programmable_layout at91sam9g45_programmable_layout = {
|
const struct clk_programmable_layout at91sam9g45_programmable_layout = {
|
||||||
|
.pres_mask = 0x7,
|
||||||
.pres_shift = 2,
|
.pres_shift = 2,
|
||||||
.css_mask = 0x3,
|
.css_mask = 0x3,
|
||||||
.have_slck_mck = 1,
|
.have_slck_mck = 1,
|
||||||
|
.is_pres_direct = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct clk_programmable_layout at91sam9x5_programmable_layout = {
|
const struct clk_programmable_layout at91sam9x5_programmable_layout = {
|
||||||
|
.pres_mask = 0x7,
|
||||||
.pres_shift = 4,
|
.pres_shift = 4,
|
||||||
.css_mask = 0x7,
|
.css_mask = 0x7,
|
||||||
.have_slck_mck = 0,
|
.have_slck_mck = 0,
|
||||||
|
.is_pres_direct = 0,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -71,9 +71,11 @@ struct clk_pll_characteristics {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct clk_programmable_layout {
|
struct clk_programmable_layout {
|
||||||
|
u8 pres_mask;
|
||||||
u8 pres_shift;
|
u8 pres_shift;
|
||||||
u8 css_mask;
|
u8 css_mask;
|
||||||
u8 have_slck_mck;
|
u8 have_slck_mck;
|
||||||
|
u8 is_pres_direct;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct clk_programmable_layout at91rm9200_programmable_layout;
|
extern const struct clk_programmable_layout at91rm9200_programmable_layout;
|
||||||
|
|||||||
@@ -125,6 +125,14 @@ static const struct {
|
|||||||
.pll = true },
|
.pll = true },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct clk_programmable_layout sama5d2_programmable_layout = {
|
||||||
|
.pres_mask = 0xff,
|
||||||
|
.pres_shift = 4,
|
||||||
|
.css_mask = 0x7,
|
||||||
|
.have_slck_mck = 0,
|
||||||
|
.is_pres_direct = 1,
|
||||||
|
};
|
||||||
|
|
||||||
static void __init sama5d2_pmc_setup(struct device_node *np)
|
static void __init sama5d2_pmc_setup(struct device_node *np)
|
||||||
{
|
{
|
||||||
struct clk_range range = CLK_RANGE(0, 0);
|
struct clk_range range = CLK_RANGE(0, 0);
|
||||||
@@ -249,7 +257,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
|
|||||||
|
|
||||||
hw = at91_clk_register_programmable(regmap, name,
|
hw = at91_clk_register_programmable(regmap, name,
|
||||||
parent_names, 6, i,
|
parent_names, 6, i,
|
||||||
&at91sam9x5_programmable_layout);
|
&sama5d2_programmable_layout);
|
||||||
if (IS_ERR(hw))
|
if (IS_ERR(hw))
|
||||||
goto err_free;
|
goto err_free;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -362,7 +362,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
|
|||||||
|
|
||||||
switch (pll_clk->type) {
|
switch (pll_clk->type) {
|
||||||
case PLL_1416X:
|
case PLL_1416X:
|
||||||
if (!pll->rate_table)
|
if (!pll_clk->rate_table)
|
||||||
init.ops = &clk_pll1416x_min_ops;
|
init.ops = &clk_pll1416x_min_ops;
|
||||||
else
|
else
|
||||||
init.ops = &clk_pll1416x_ops;
|
init.ops = &clk_pll1416x_ops;
|
||||||
|
|||||||
@@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate(
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
init.name = name;
|
init.name = name;
|
||||||
init.flags = CLK_SET_RATE_PARENT;
|
init.flags = flags | CLK_SET_RATE_PARENT;
|
||||||
init.parent_names = parent_name ? &parent_name : NULL;
|
init.parent_names = parent_name ? &parent_name : NULL;
|
||||||
init.num_parents = parent_name ? 1 : 0;
|
init.num_parents = parent_name ? 1 : 0;
|
||||||
init.ops = ops;
|
init.ops = ops;
|
||||||
init.flags = flags;
|
|
||||||
|
|
||||||
cg->regmap = regmap;
|
cg->regmap = regmap;
|
||||||
cg->set_ofs = set_ofs;
|
cg->set_ofs = set_ofs;
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate,
|
|||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
/* Round down */
|
/* Round down */
|
||||||
if (now < rate && best < now)
|
if (now <= rate && best < now)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -960,14 +960,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = {
|
|||||||
/* VPU Clock */
|
/* VPU Clock */
|
||||||
|
|
||||||
static const char * const g12a_vpu_parent_names[] = {
|
static const char * const g12a_vpu_parent_names[] = {
|
||||||
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
|
"fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
|
||||||
"mpll1", "vid_pll", "hifi_pll", "gp0_pll",
|
"mpll1", "vid_pll", "hifi_pll", "gp0_pll",
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct clk_regmap g12a_vpu_0_sel = {
|
static struct clk_regmap g12a_vpu_0_sel = {
|
||||||
.data = &(struct clk_regmap_mux_data){
|
.data = &(struct clk_regmap_mux_data){
|
||||||
.offset = HHI_VPU_CLK_CNTL,
|
.offset = HHI_VPU_CLK_CNTL,
|
||||||
.mask = 0x3,
|
.mask = 0x7,
|
||||||
.shift = 9,
|
.shift = 9,
|
||||||
},
|
},
|
||||||
.hw.init = &(struct clk_init_data){
|
.hw.init = &(struct clk_init_data){
|
||||||
@@ -1011,7 +1011,7 @@ static struct clk_regmap g12a_vpu_0 = {
|
|||||||
static struct clk_regmap g12a_vpu_1_sel = {
|
static struct clk_regmap g12a_vpu_1_sel = {
|
||||||
.data = &(struct clk_regmap_mux_data){
|
.data = &(struct clk_regmap_mux_data){
|
||||||
.offset = HHI_VPU_CLK_CNTL,
|
.offset = HHI_VPU_CLK_CNTL,
|
||||||
.mask = 0x3,
|
.mask = 0x7,
|
||||||
.shift = 25,
|
.shift = 25,
|
||||||
},
|
},
|
||||||
.hw.init = &(struct clk_init_data){
|
.hw.init = &(struct clk_init_data){
|
||||||
|
|||||||
@@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
|
|||||||
.offset = HHI_VDEC_CLK_CNTL,
|
.offset = HHI_VDEC_CLK_CNTL,
|
||||||
.shift = 0,
|
.shift = 0,
|
||||||
.width = 7,
|
.width = 7,
|
||||||
|
.flags = CLK_DIVIDER_ROUND_CLOSEST,
|
||||||
},
|
},
|
||||||
.hw.init = &(struct clk_init_data){
|
.hw.init = &(struct clk_init_data){
|
||||||
.name = "vdec_1_div",
|
.name = "vdec_1_div",
|
||||||
@@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
|
|||||||
.offset = HHI_VDEC2_CLK_CNTL,
|
.offset = HHI_VDEC2_CLK_CNTL,
|
||||||
.shift = 16,
|
.shift = 16,
|
||||||
.width = 7,
|
.width = 7,
|
||||||
|
.flags = CLK_DIVIDER_ROUND_CLOSEST,
|
||||||
},
|
},
|
||||||
.hw.init = &(struct clk_init_data){
|
.hw.init = &(struct clk_init_data){
|
||||||
.name = "vdec_hevc_div",
|
.name = "vdec_hevc_div",
|
||||||
|
|||||||
@@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
|
|||||||
div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
|
div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
|
||||||
meson_parm_read(clk->map, &pll_div->sel));
|
meson_parm_read(clk->map, &pll_div->sel));
|
||||||
if (!div || !div->divider) {
|
if (!div || !div->divider) {
|
||||||
pr_info("%s: Invalid config value for vid_pll_div\n", __func__);
|
pr_debug("%s: Invalid config value for vid_pll_div\n", __func__);
|
||||||
return parent_rate;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
|
return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
|
static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
|
||||||
void __iomem *base,
|
const struct pmc_clk_data *pmc_data,
|
||||||
const char **parent_names,
|
const char **parent_names,
|
||||||
int num_parents)
|
int num_parents)
|
||||||
{
|
{
|
||||||
@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
|
|||||||
init.num_parents = num_parents;
|
init.num_parents = num_parents;
|
||||||
|
|
||||||
pclk->hw.init = &init;
|
pclk->hw.init = &init;
|
||||||
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
|
pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
|
||||||
spin_lock_init(&pclk->lock);
|
spin_lock_init(&pclk->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On some systems, the pmc_plt_clocks already enabled by the
|
||||||
|
* firmware are being marked as critical to avoid them being
|
||||||
|
* gated by the clock framework.
|
||||||
|
*/
|
||||||
|
if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
|
||||||
|
init.flags |= CLK_IS_CRITICAL;
|
||||||
|
|
||||||
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
|
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pclk = ERR_PTR(ret);
|
pclk = ERR_PTR(ret);
|
||||||
@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
|
|||||||
return PTR_ERR(parent_names);
|
return PTR_ERR(parent_names);
|
||||||
|
|
||||||
for (i = 0; i < PMC_CLK_NUM; i++) {
|
for (i = 0; i < PMC_CLK_NUM; i++) {
|
||||||
data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
|
data->clks[i] = plt_clk_register(pdev, i, pmc_data,
|
||||||
parent_names, data->nparents);
|
parent_names, data->nparents);
|
||||||
if (IS_ERR(data->clks[i])) {
|
if (IS_ERR(data->clks[i])) {
|
||||||
err = PTR_ERR(data->clks[i]);
|
err = PTR_ERR(data->clks[i]);
|
||||||
|
|||||||
@@ -3173,11 +3173,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
if (fence) {
|
if (fence) {
|
||||||
r = dma_fence_wait_timeout(fence, false, tmo);
|
tmo = dma_fence_wait_timeout(fence, false, tmo);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
fence = next;
|
fence = next;
|
||||||
if (r <= 0)
|
if (tmo == 0) {
|
||||||
|
r = -ETIMEDOUT;
|
||||||
break;
|
break;
|
||||||
|
} else if (tmo < 0) {
|
||||||
|
r = tmo;
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
fence = next;
|
fence = next;
|
||||||
}
|
}
|
||||||
@@ -3188,8 +3193,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
|||||||
tmo = dma_fence_wait_timeout(fence, false, tmo);
|
tmo = dma_fence_wait_timeout(fence, false, tmo);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
if (r <= 0 || tmo <= 0) {
|
if (r < 0 || tmo <= 0) {
|
||||||
DRM_ERROR("recover vram bo from shadow failed\n");
|
DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
|
|
||||||
#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
|
#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
|
||||||
|
#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IB
|
* IB
|
||||||
@@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
|||||||
* cost waiting for it coming back under RUNTIME only
|
* cost waiting for it coming back under RUNTIME only
|
||||||
*/
|
*/
|
||||||
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
|
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
|
||||||
|
} else if (adev->gmc.xgmi.hive_id) {
|
||||||
|
tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < adev->num_rings; ++i) {
|
for (i = 0; i < adev->num_rings; ++i) {
|
||||||
|
|||||||
@@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = {
|
|||||||
{ 0x9876, &carrizo_device_info }, /* Carrizo */
|
{ 0x9876, &carrizo_device_info }, /* Carrizo */
|
||||||
{ 0x9877, &carrizo_device_info }, /* Carrizo */
|
{ 0x9877, &carrizo_device_info }, /* Carrizo */
|
||||||
{ 0x15DD, &raven_device_info }, /* Raven */
|
{ 0x15DD, &raven_device_info }, /* Raven */
|
||||||
|
{ 0x15D8, &raven_device_info }, /* Raven */
|
||||||
#endif
|
#endif
|
||||||
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
|
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
|
||||||
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
|
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
|
||||||
|
|||||||
@@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane,
|
|||||||
amdgpu_crtc->cursor_width = plane->state->crtc_w;
|
amdgpu_crtc->cursor_width = plane->state->crtc_w;
|
||||||
amdgpu_crtc->cursor_height = plane->state->crtc_h;
|
amdgpu_crtc->cursor_height = plane->state->crtc_h;
|
||||||
|
|
||||||
|
memset(&attributes, 0, sizeof(attributes));
|
||||||
attributes.address.high_part = upper_32_bits(address);
|
attributes.address.high_part = upper_32_bits(address);
|
||||||
attributes.address.low_part = lower_32_bits(address);
|
attributes.address.low_part = lower_32_bits(address);
|
||||||
attributes.width = plane->state->crtc_w;
|
attributes.width = plane->state->crtc_w;
|
||||||
|
|||||||
@@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position(
|
|||||||
REG_UPDATE(CURSOR_CONTROL,
|
REG_UPDATE(CURSOR_CONTROL,
|
||||||
CURSOR_ENABLE, cur_en);
|
CURSOR_ENABLE, cur_en);
|
||||||
|
|
||||||
//account for cases where we see negative offset relative to overlay plane
|
REG_SET_2(CURSOR_POSITION, 0,
|
||||||
if (src_x_offset < 0 && src_y_offset < 0) {
|
|
||||||
REG_SET_2(CURSOR_POSITION, 0,
|
|
||||||
CURSOR_X_POSITION, 0,
|
|
||||||
CURSOR_Y_POSITION, 0);
|
|
||||||
x_hotspot -= src_x_offset;
|
|
||||||
y_hotspot -= src_y_offset;
|
|
||||||
} else if (src_x_offset < 0) {
|
|
||||||
REG_SET_2(CURSOR_POSITION, 0,
|
|
||||||
CURSOR_X_POSITION, 0,
|
|
||||||
CURSOR_Y_POSITION, pos->y);
|
|
||||||
x_hotspot -= src_x_offset;
|
|
||||||
} else if (src_y_offset < 0) {
|
|
||||||
REG_SET_2(CURSOR_POSITION, 0,
|
|
||||||
CURSOR_X_POSITION, pos->x,
|
CURSOR_X_POSITION, pos->x,
|
||||||
CURSOR_Y_POSITION, 0);
|
CURSOR_Y_POSITION, pos->y);
|
||||||
y_hotspot -= src_y_offset;
|
|
||||||
} else {
|
|
||||||
REG_SET_2(CURSOR_POSITION, 0,
|
|
||||||
CURSOR_X_POSITION, pos->x,
|
|
||||||
CURSOR_Y_POSITION, pos->y);
|
|
||||||
}
|
|
||||||
|
|
||||||
REG_SET_2(CURSOR_HOT_SPOT, 0,
|
REG_SET_2(CURSOR_HOT_SPOT, 0,
|
||||||
CURSOR_HOT_SPOT_X, x_hotspot,
|
CURSOR_HOT_SPOT_X, x_hotspot,
|
||||||
|
|||||||
@@ -1037,6 +1037,31 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
|
EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
|
||||||
|
|
||||||
|
/* Filter out invalid setups to avoid configuring SCDC and scrambling */
|
||||||
|
static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
|
||||||
|
{
|
||||||
|
struct drm_display_info *display = &hdmi->connector.display_info;
|
||||||
|
|
||||||
|
/* Completely disable SCDC support for older controllers */
|
||||||
|
if (hdmi->version < 0x200a)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
|
||||||
|
if (!display->hdmi.scdc.supported ||
|
||||||
|
!display->hdmi.scdc.scrambling.supported)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable if display only support low TMDS rates and scrambling
|
||||||
|
* for low rates is not supported either
|
||||||
|
*/
|
||||||
|
if (!display->hdmi.scdc.scrambling.low_rates &&
|
||||||
|
display->max_tmds_clock <= 340000)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
|
* HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
|
||||||
* - The Source shall suspend transmission of the TMDS clock and data
|
* - The Source shall suspend transmission of the TMDS clock and data
|
||||||
@@ -1055,7 +1080,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
|
|||||||
unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
|
unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
|
||||||
|
|
||||||
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
|
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
|
||||||
if (hdmi->connector.display_info.hdmi.scdc.supported) {
|
if (dw_hdmi_support_scdc(hdmi)) {
|
||||||
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
|
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
|
||||||
drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
|
drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
|
||||||
else
|
else
|
||||||
@@ -1579,8 +1604,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||||||
|
|
||||||
/* Set up HDMI_FC_INVIDCONF */
|
/* Set up HDMI_FC_INVIDCONF */
|
||||||
inv_val = (hdmi->hdmi_data.hdcp_enable ||
|
inv_val = (hdmi->hdmi_data.hdcp_enable ||
|
||||||
vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
|
(dw_hdmi_support_scdc(hdmi) &&
|
||||||
hdmi_info->scdc.scrambling.low_rates ?
|
(vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
|
||||||
|
hdmi_info->scdc.scrambling.low_rates)) ?
|
||||||
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
|
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
|
||||||
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
|
HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
|
||||||
|
|
||||||
@@ -1646,7 +1672,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Scrambling Control */
|
/* Scrambling Control */
|
||||||
if (hdmi_info->scdc.supported) {
|
if (dw_hdmi_support_scdc(hdmi)) {
|
||||||
if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
|
if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
|
||||||
hdmi_info->scdc.scrambling.low_rates) {
|
hdmi_info->scdc.scrambling.low_rates) {
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
|
|||||||
funcs->atomic_disable(crtc, old_crtc_state);
|
funcs->atomic_disable(crtc, old_crtc_state);
|
||||||
else if (funcs->disable)
|
else if (funcs->disable)
|
||||||
funcs->disable(crtc);
|
funcs->disable(crtc);
|
||||||
else
|
else if (funcs->dpms)
|
||||||
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
|
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||||
|
|
||||||
if (!(dev->irq_enabled && dev->num_crtcs))
|
if (!(dev->irq_enabled && dev->num_crtcs))
|
||||||
@@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
|
|||||||
if (new_crtc_state->enable) {
|
if (new_crtc_state->enable) {
|
||||||
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
|
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
|
||||||
crtc->base.id, crtc->name);
|
crtc->base.id, crtc->name);
|
||||||
|
|
||||||
if (funcs->atomic_enable)
|
if (funcs->atomic_enable)
|
||||||
funcs->atomic_enable(crtc, old_crtc_state);
|
funcs->atomic_enable(crtc, old_crtc_state);
|
||||||
else
|
else if (funcs->commit)
|
||||||
funcs->commit(crtc);
|
funcs->commit(crtc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
|||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
struct intel_vgpu_primary_plane_format p;
|
struct intel_vgpu_primary_plane_format p;
|
||||||
struct intel_vgpu_cursor_plane_format c;
|
struct intel_vgpu_cursor_plane_format c;
|
||||||
int ret;
|
int ret, tile_height = 1;
|
||||||
|
|
||||||
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
|
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
|
||||||
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
|
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
|
||||||
@@ -228,12 +228,15 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
|||||||
break;
|
break;
|
||||||
case PLANE_CTL_TILED_X:
|
case PLANE_CTL_TILED_X:
|
||||||
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
|
info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
|
||||||
|
tile_height = 8;
|
||||||
break;
|
break;
|
||||||
case PLANE_CTL_TILED_Y:
|
case PLANE_CTL_TILED_Y:
|
||||||
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
|
info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
|
||||||
|
tile_height = 32;
|
||||||
break;
|
break;
|
||||||
case PLANE_CTL_TILED_YF:
|
case PLANE_CTL_TILED_YF:
|
||||||
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
|
info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
|
||||||
|
tile_height = 32;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
|
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
|
||||||
@@ -264,8 +267,8 @@ static int vgpu_get_plane_info(struct drm_device *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
info->size = (info->stride * info->height + PAGE_SIZE - 1)
|
info->size = (info->stride * roundup(info->height, tile_height)
|
||||||
>> PAGE_SHIFT;
|
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
if (info->size == 0) {
|
if (info->size == 0) {
|
||||||
gvt_vgpu_err("fb size is zero\n");
|
gvt_vgpu_err("fb size is zero\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
|
|||||||
|
|
||||||
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
|
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct intel_vgpu_ppgtt_spt *spt;
|
struct intel_vgpu_ppgtt_spt *spt, *spn;
|
||||||
struct radix_tree_iter iter;
|
struct radix_tree_iter iter;
|
||||||
void **slot;
|
LIST_HEAD(all_spt);
|
||||||
|
void __rcu **slot;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
|
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
|
||||||
spt = radix_tree_deref_slot(slot);
|
spt = radix_tree_deref_slot(slot);
|
||||||
ppgtt_free_spt(spt);
|
list_move(&spt->post_shadow_list, &all_spt);
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
|
||||||
|
ppgtt_free_spt(spt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ppgtt_handle_guest_write_page_table_bytes(
|
static int ppgtt_handle_guest_write_page_table_bytes(
|
||||||
|
|||||||
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
|
|||||||
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
|
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
|
||||||
void *buf, unsigned long count, bool is_write)
|
void *buf, unsigned long count, bool is_write)
|
||||||
{
|
{
|
||||||
void *aperture_va;
|
void __iomem *aperture_va;
|
||||||
|
|
||||||
if (!intel_vgpu_in_aperture(vgpu, off) ||
|
if (!intel_vgpu_in_aperture(vgpu, off) ||
|
||||||
!intel_vgpu_in_aperture(vgpu, off + count)) {
|
!intel_vgpu_in_aperture(vgpu, off + count)) {
|
||||||
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (is_write)
|
if (is_write)
|
||||||
memcpy(aperture_va + offset_in_page(off), buf, count);
|
memcpy_toio(aperture_va + offset_in_page(off), buf, count);
|
||||||
else
|
else
|
||||||
memcpy(buf, aperture_va + offset_in_page(off), count);
|
memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
|
||||||
|
|
||||||
io_mapping_unmap(aperture_va);
|
io_mapping_unmap(aperture_va);
|
||||||
|
|
||||||
|
|||||||
@@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
|
||||||
|
struct intel_dsi *intel_dsi)
|
||||||
|
{
|
||||||
|
enum port port;
|
||||||
|
|
||||||
|
for_each_dsi_port(port, intel_dsi->ports) {
|
||||||
|
WARN_ON(intel_dsi->io_wakeref[port]);
|
||||||
|
intel_dsi->io_wakeref[port] =
|
||||||
|
intel_display_power_get(dev_priv,
|
||||||
|
port == PORT_A ?
|
||||||
|
POWER_DOMAIN_PORT_DDI_A_IO :
|
||||||
|
POWER_DOMAIN_PORT_DDI_B_IO);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
|
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||||
@@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
|
|||||||
I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
|
I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_dsi_port(port, intel_dsi->ports) {
|
get_dsi_io_power_domains(dev_priv, intel_dsi);
|
||||||
intel_dsi->io_wakeref[port] =
|
|
||||||
intel_display_power_get(dev_priv,
|
|
||||||
port == PORT_A ?
|
|
||||||
POWER_DOMAIN_PORT_DDI_A_IO :
|
|
||||||
POWER_DOMAIN_PORT_DDI_B_IO);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
|
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
|
||||||
@@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
|
|||||||
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
|
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
|
||||||
}
|
}
|
||||||
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
|
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
|
||||||
|
|
||||||
|
for_each_dsi_port(port, intel_dsi->ports) {
|
||||||
|
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
|
||||||
|
}
|
||||||
|
I915_WRITE(DPCLKA_CFGCR0_ICL, val);
|
||||||
|
|
||||||
POSTING_READ(DPCLKA_CFGCR0_ICL);
|
POSTING_READ(DPCLKA_CFGCR0_ICL);
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->dpll_lock);
|
mutex_unlock(&dev_priv->dpll_lock);
|
||||||
@@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
|
|||||||
DRM_ERROR("DDI port:%c buffer not idle\n",
|
DRM_ERROR("DDI port:%c buffer not idle\n",
|
||||||
port_name(port));
|
port_name(port));
|
||||||
}
|
}
|
||||||
gen11_dsi_ungate_clocks(encoder);
|
gen11_dsi_gate_clocks(encoder);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
|
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
|
||||||
@@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
|
static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
|
||||||
struct intel_crtc_state *crtc_state)
|
struct intel_crtc_state *crtc_state)
|
||||||
{
|
{
|
||||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
get_dsi_io_power_domains(to_i915(encoder->base.dev),
|
||||||
u64 domains = 0;
|
enc_to_intel_dsi(&encoder->base));
|
||||||
enum port port;
|
|
||||||
|
|
||||||
for_each_dsi_port(port, intel_dsi->ports)
|
|
||||||
if (port == PORT_A)
|
|
||||||
domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
|
|
||||||
else
|
|
||||||
domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
|
|
||||||
|
|
||||||
return domains;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
|
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||||
|
|||||||
@@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
|
|||||||
intel_aux_power_domain(dig_port);
|
intel_aux_power_domain(dig_port);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
||||||
struct intel_crtc_state *crtc_state)
|
struct intel_crtc_state *crtc_state)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||||
struct intel_digital_port *dig_port;
|
struct intel_digital_port *dig_port;
|
||||||
u64 domains;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: Add support for MST encoders. Atm, the following should never
|
* TODO: Add support for MST encoders. Atm, the following should never
|
||||||
@@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
|||||||
* hook.
|
* hook.
|
||||||
*/
|
*/
|
||||||
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
|
if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
dig_port = enc_to_dig_port(&encoder->base);
|
dig_port = enc_to_dig_port(&encoder->base);
|
||||||
domains = BIT_ULL(dig_port->ddi_io_power_domain);
|
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
|
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
|
||||||
@@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
|||||||
*/
|
*/
|
||||||
if (intel_crtc_has_dp_encoder(crtc_state) ||
|
if (intel_crtc_has_dp_encoder(crtc_state) ||
|
||||||
intel_port_is_tc(dev_priv, encoder->port))
|
intel_port_is_tc(dev_priv, encoder->port))
|
||||||
domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
|
intel_display_power_get(dev_priv,
|
||||||
|
intel_ddi_main_link_aux_domain(dig_port));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VDSC power is needed when DSC is enabled
|
* VDSC power is needed when DSC is enabled
|
||||||
*/
|
*/
|
||||||
if (crtc_state->dsc_params.compression_enable)
|
if (crtc_state->dsc_params.compression_enable)
|
||||||
domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
|
intel_display_power_get(dev_priv,
|
||||||
|
intel_dsc_power_domain(crtc_state));
|
||||||
return domains;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
|
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
|
||||||
@@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* DSI ports should have their DDI clock ungated when disabled
|
* For DSI we keep the ddi clocks gated
|
||||||
* and gated when enabled.
|
* except during enable/disable sequence.
|
||||||
*/
|
*/
|
||||||
ddi_clk_needed = !encoder->base.crtc;
|
ddi_clk_needed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
val = I915_READ(DPCLKA_CFGCR0_ICL);
|
val = I915_READ(DPCLKA_CFGCR0_ICL);
|
||||||
|
|||||||
@@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
|
|||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
|
|
||||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||||
u64 get_domains;
|
|
||||||
enum intel_display_power_domain domain;
|
|
||||||
struct intel_crtc_state *crtc_state;
|
struct intel_crtc_state *crtc_state;
|
||||||
|
|
||||||
if (!encoder->get_power_domains)
|
if (!encoder->get_power_domains)
|
||||||
@@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
|
crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
|
||||||
get_domains = encoder->get_power_domains(encoder, crtc_state);
|
encoder->get_power_domains(encoder, crtc_state);
|
||||||
for_each_power_domain(domain, get_domains)
|
|
||||||
intel_display_power_get(dev_priv, domain);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Optimize link config in order: max bpp, min lanes, min clock */
|
|
||||||
static int
|
|
||||||
intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
|
|
||||||
struct intel_crtc_state *pipe_config,
|
|
||||||
const struct link_config_limits *limits)
|
|
||||||
{
|
|
||||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
|
||||||
int bpp, clock, lane_count;
|
|
||||||
int mode_rate, link_clock, link_avail;
|
|
||||||
|
|
||||||
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
|
|
||||||
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
|
|
||||||
bpp);
|
|
||||||
|
|
||||||
for (lane_count = limits->min_lane_count;
|
|
||||||
lane_count <= limits->max_lane_count;
|
|
||||||
lane_count <<= 1) {
|
|
||||||
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
|
|
||||||
link_clock = intel_dp->common_rates[clock];
|
|
||||||
link_avail = intel_dp_max_data_rate(link_clock,
|
|
||||||
lane_count);
|
|
||||||
|
|
||||||
if (mode_rate <= link_avail) {
|
|
||||||
pipe_config->lane_count = lane_count;
|
|
||||||
pipe_config->pipe_bpp = bpp;
|
|
||||||
pipe_config->port_clock = link_clock;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
|
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
|
||||||
{
|
{
|
||||||
int i, num_bpc;
|
int i, num_bpc;
|
||||||
@@ -2031,15 +1995,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
|||||||
limits.min_bpp = 6 * 3;
|
limits.min_bpp = 6 * 3;
|
||||||
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
|
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
|
||||||
|
|
||||||
if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
|
if (intel_dp_is_edp(intel_dp)) {
|
||||||
/*
|
/*
|
||||||
* Use the maximum clock and number of lanes the eDP panel
|
* Use the maximum clock and number of lanes the eDP panel
|
||||||
* advertizes being capable of. The eDP 1.3 and earlier panels
|
* advertizes being capable of. The panels are generally
|
||||||
* are generally designed to support only a single clock and
|
* designed to support only a single clock and lane
|
||||||
* lane configuration, and typically these values correspond to
|
* configuration, and typically these values correspond to the
|
||||||
* the native resolution of the panel. With eDP 1.4 rate select
|
* native resolution of the panel.
|
||||||
* and DSC, this is decreasingly the case, and we need to be
|
|
||||||
* able to select less than maximum link config.
|
|
||||||
*/
|
*/
|
||||||
limits.min_lane_count = limits.max_lane_count;
|
limits.min_lane_count = limits.max_lane_count;
|
||||||
limits.min_clock = limits.max_clock;
|
limits.min_clock = limits.max_clock;
|
||||||
@@ -2053,22 +2015,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
|||||||
intel_dp->common_rates[limits.max_clock],
|
intel_dp->common_rates[limits.max_clock],
|
||||||
limits.max_bpp, adjusted_mode->crtc_clock);
|
limits.max_bpp, adjusted_mode->crtc_clock);
|
||||||
|
|
||||||
if (intel_dp_is_edp(intel_dp))
|
/*
|
||||||
/*
|
* Optimize for slow and wide. This is the place to add alternative
|
||||||
* Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
|
* optimization policy.
|
||||||
* section A.1: "It is recommended that the minimum number of
|
*/
|
||||||
* lanes be used, using the minimum link rate allowed for that
|
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
|
||||||
* lane configuration."
|
|
||||||
*
|
|
||||||
* Note that we use the max clock and lane count for eDP 1.3 and
|
|
||||||
* earlier, and fast vs. wide is irrelevant.
|
|
||||||
*/
|
|
||||||
ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
|
|
||||||
&limits);
|
|
||||||
else
|
|
||||||
/* Optimize for slow and wide. */
|
|
||||||
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
|
|
||||||
&limits);
|
|
||||||
|
|
||||||
/* enable compression if the mode doesn't fit available BW */
|
/* enable compression if the mode doesn't fit available BW */
|
||||||
DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
|
DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
|
||||||
|
|||||||
@@ -270,10 +270,12 @@ struct intel_encoder {
|
|||||||
* be set correctly before calling this function. */
|
* be set correctly before calling this function. */
|
||||||
void (*get_config)(struct intel_encoder *,
|
void (*get_config)(struct intel_encoder *,
|
||||||
struct intel_crtc_state *pipe_config);
|
struct intel_crtc_state *pipe_config);
|
||||||
/* Returns a mask of power domains that need to be referenced as part
|
/*
|
||||||
* of the hardware state readout code. */
|
* Acquires the power domains needed for an active encoder during
|
||||||
u64 (*get_power_domains)(struct intel_encoder *encoder,
|
* hardware state readout.
|
||||||
struct intel_crtc_state *crtc_state);
|
*/
|
||||||
|
void (*get_power_domains)(struct intel_encoder *encoder,
|
||||||
|
struct intel_crtc_state *crtc_state);
|
||||||
/*
|
/*
|
||||||
* Called during system suspend after all pending requests for the
|
* Called during system suspend after all pending requests for the
|
||||||
* encoder are flushed (for example for DP AUX transactions) and
|
* encoder are flushed (for example for DP AUX transactions) and
|
||||||
|
|||||||
@@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
|
|||||||
mutex_unlock(&dev_priv->sb_lock);
|
mutex_unlock(&dev_priv->sb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
|
tmp = I915_READ(PIPEMISC(crtc->pipe));
|
||||||
|
|
||||||
|
switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
|
||||||
|
case PIPEMISC_DITHER_6_BPC:
|
||||||
|
return 18;
|
||||||
|
case PIPEMISC_DITHER_8_BPC:
|
||||||
|
return 24;
|
||||||
|
case PIPEMISC_DITHER_10_BPC:
|
||||||
|
return 30;
|
||||||
|
case PIPEMISC_DITHER_12_BPC:
|
||||||
|
return 36;
|
||||||
|
default:
|
||||||
|
MISSING_CASE(tmp);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int intel_dsi_compute_config(struct intel_encoder *encoder,
|
static int intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||||
struct intel_crtc_state *pipe_config,
|
struct intel_crtc_state *pipe_config,
|
||||||
struct drm_connector_state *conn_state)
|
struct drm_connector_state *conn_state)
|
||||||
@@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
|
|||||||
bpp = mipi_dsi_pixel_format_to_bpp(
|
bpp = mipi_dsi_pixel_format_to_bpp(
|
||||||
pixel_format_from_register_bits(fmt));
|
pixel_format_from_register_bits(fmt));
|
||||||
|
|
||||||
|
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
|
||||||
|
|
||||||
/* Enable Frame time stamo based scanline reporting */
|
/* Enable Frame time stamo based scanline reporting */
|
||||||
adjusted_mode->private_flags |=
|
adjusted_mode->private_flags |=
|
||||||
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
|
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
|
||||||
|
|||||||
@@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock)
|
|||||||
static unsigned int mt2701_calculate_factor(int clock)
|
static unsigned int mt2701_calculate_factor(int clock)
|
||||||
{
|
{
|
||||||
if (clock <= 64000)
|
if (clock <= 64000)
|
||||||
return 16;
|
|
||||||
else if (clock <= 128000)
|
|
||||||
return 8;
|
|
||||||
else if (clock <= 256000)
|
|
||||||
return 4;
|
return 4;
|
||||||
else
|
else if (clock <= 128000)
|
||||||
return 2;
|
return 2;
|
||||||
|
else
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mtk_dpi_conf mt8173_conf = {
|
static const struct mtk_dpi_conf mt8173_conf = {
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include <drm/drm_atomic.h>
|
#include <drm/drm_atomic.h>
|
||||||
#include <drm/drm_atomic_helper.h>
|
#include <drm/drm_atomic_helper.h>
|
||||||
|
#include <drm/drm_fb_helper.h>
|
||||||
#include <drm/drm_gem.h>
|
#include <drm/drm_gem.h>
|
||||||
#include <drm/drm_gem_cma_helper.h>
|
#include <drm/drm_gem_cma_helper.h>
|
||||||
#include <drm/drm_of.h>
|
#include <drm/drm_of.h>
|
||||||
@@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = {
|
|||||||
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
|
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
|
||||||
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
|
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
|
||||||
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
|
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
|
||||||
|
.gem_prime_vmap = mtk_drm_gem_prime_vmap,
|
||||||
|
.gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
|
||||||
.fops = &mtk_drm_fops,
|
.fops = &mtk_drm_fops,
|
||||||
|
|
||||||
.name = DRIVER_NAME,
|
.name = DRIVER_NAME,
|
||||||
@@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_deinit;
|
goto err_deinit;
|
||||||
|
|
||||||
|
ret = drm_fbdev_generic_setup(drm, 32);
|
||||||
|
if (ret)
|
||||||
|
DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_deinit:
|
err_deinit:
|
||||||
|
|||||||
@@ -241,3 +241,49 @@ err_gem_free:
|
|||||||
kfree(mtk_gem);
|
kfree(mtk_gem);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||||
|
struct sg_table *sgt;
|
||||||
|
struct sg_page_iter iter;
|
||||||
|
unsigned int npages;
|
||||||
|
unsigned int i = 0;
|
||||||
|
|
||||||
|
if (mtk_gem->kvaddr)
|
||||||
|
return mtk_gem->kvaddr;
|
||||||
|
|
||||||
|
sgt = mtk_gem_prime_get_sg_table(obj);
|
||||||
|
if (IS_ERR(sgt))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
npages = obj->size >> PAGE_SHIFT;
|
||||||
|
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
|
||||||
|
if (!mtk_gem->pages)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
|
||||||
|
mtk_gem->pages[i++] = sg_page_iter_page(&iter);
|
||||||
|
if (i > npages)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
|
||||||
|
pgprot_writecombine(PAGE_KERNEL));
|
||||||
|
|
||||||
|
out:
|
||||||
|
kfree((void *)sgt);
|
||||||
|
|
||||||
|
return mtk_gem->kvaddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||||
|
{
|
||||||
|
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
|
||||||
|
|
||||||
|
if (!mtk_gem->pages)
|
||||||
|
return;
|
||||||
|
|
||||||
|
vunmap(vaddr);
|
||||||
|
mtk_gem->kvaddr = 0;
|
||||||
|
kfree((void *)mtk_gem->pages);
|
||||||
|
}
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ struct mtk_drm_gem_obj {
|
|||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
unsigned long dma_attrs;
|
unsigned long dma_attrs;
|
||||||
struct sg_table *sg;
|
struct sg_table *sg;
|
||||||
|
struct page **pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
|
#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
|
||||||
@@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
|
|||||||
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||||
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
|
struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
|
||||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||||
|
void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
|
||||||
|
void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
|
|||||||
if (IS_ERR(regmap))
|
if (IS_ERR(regmap))
|
||||||
ret = PTR_ERR(regmap);
|
ret = PTR_ERR(regmap);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ret = PTR_ERR(regmap);
|
|
||||||
dev_err(dev,
|
dev_err(dev,
|
||||||
"Failed to get system configuration registers: %d\n",
|
"Failed to get system configuration registers: %d\n",
|
||||||
ret);
|
ret);
|
||||||
@@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
|
|||||||
of_node_put(remote);
|
of_node_put(remote);
|
||||||
|
|
||||||
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
|
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
|
||||||
|
of_node_put(i2c_np);
|
||||||
if (!hdmi->ddc_adpt) {
|
if (!hdmi->ddc_adpt) {
|
||||||
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
|
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|||||||
@@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
|
|||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
||||||
unsigned long *parent_rate)
|
|
||||||
{
|
|
||||||
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
|
|
||||||
|
|
||||||
hdmi_phy->pll_rate = rate;
|
|
||||||
if (rate <= 74250000)
|
|
||||||
*parent_rate = rate;
|
|
||||||
else
|
|
||||||
*parent_rate = rate / 2;
|
|
||||||
|
|
||||||
return rate;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
|
|
||||||
unsigned long parent_rate)
|
|
||||||
{
|
|
||||||
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
|
|
||||||
|
|
||||||
return hdmi_phy->pll_rate;
|
|
||||||
}
|
|
||||||
|
|
||||||
void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
|
void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
|
||||||
u32 bits)
|
u32 bits)
|
||||||
{
|
{
|
||||||
@@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
|
static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
|
||||||
const struct clk_ops **ops)
|
struct clk_init_data *clk_init)
|
||||||
{
|
{
|
||||||
if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
|
clk_init->flags = hdmi_phy->conf->flags;
|
||||||
*ops = hdmi_phy->conf->hdmi_phy_clk_ops;
|
clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
|
||||||
else
|
|
||||||
dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
|
static int mtk_hdmi_phy_probe(struct platform_device *pdev)
|
||||||
@@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
|
|||||||
struct clk_init_data clk_init = {
|
struct clk_init_data clk_init = {
|
||||||
.num_parents = 1,
|
.num_parents = 1,
|
||||||
.parent_names = (const char * const *)&ref_clk_name,
|
.parent_names = (const char * const *)&ref_clk_name,
|
||||||
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct phy *phy;
|
struct phy *phy;
|
||||||
@@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
|
|||||||
hdmi_phy->dev = dev;
|
hdmi_phy->dev = dev;
|
||||||
hdmi_phy->conf =
|
hdmi_phy->conf =
|
||||||
(struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
|
(struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
|
||||||
mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
|
mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
|
||||||
hdmi_phy->pll_hw.init = &clk_init;
|
hdmi_phy->pll_hw.init = &clk_init;
|
||||||
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
|
hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
|
||||||
if (IS_ERR(hdmi_phy->pll)) {
|
if (IS_ERR(hdmi_phy->pll)) {
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ struct mtk_hdmi_phy;
|
|||||||
|
|
||||||
struct mtk_hdmi_phy_conf {
|
struct mtk_hdmi_phy_conf {
|
||||||
bool tz_disabled;
|
bool tz_disabled;
|
||||||
|
unsigned long flags;
|
||||||
const struct clk_ops *hdmi_phy_clk_ops;
|
const struct clk_ops *hdmi_phy_clk_ops;
|
||||||
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
|
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
|
||||||
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
|
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
|
||||||
@@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
|
|||||||
void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
|
void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
|
||||||
u32 val, u32 mask);
|
u32 val, u32 mask);
|
||||||
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
|
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
|
||||||
long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
||||||
unsigned long *parent_rate);
|
|
||||||
unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
|
|
||||||
unsigned long parent_rate);
|
|
||||||
|
|
||||||
extern struct platform_driver mtk_hdmi_phy_driver;
|
extern struct platform_driver mtk_hdmi_phy_driver;
|
||||||
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
|
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
|
||||||
|
|||||||
@@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
|
|||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
||||||
usleep_range(80, 100);
|
usleep_range(80, 100);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
|
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
||||||
@@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
|
|||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
|
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
||||||
usleep_range(80, 100);
|
usleep_range(80, 100);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
||||||
@@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
|
|||||||
usleep_range(80, 100);
|
usleep_range(80, 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||||
|
unsigned long *parent_rate)
|
||||||
|
{
|
||||||
|
return rate;
|
||||||
|
}
|
||||||
|
|
||||||
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||||
unsigned long parent_rate)
|
unsigned long parent_rate)
|
||||||
{
|
{
|
||||||
@@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
|
|
||||||
if (rate <= 64000000)
|
if (rate <= 64000000)
|
||||||
pos_div = 3;
|
pos_div = 3;
|
||||||
else if (rate <= 12800000)
|
else if (rate <= 128000000)
|
||||||
pos_div = 1;
|
pos_div = 2;
|
||||||
else
|
else
|
||||||
pos_div = 1;
|
pos_div = 1;
|
||||||
|
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
|
||||||
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
|
||||||
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
|
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
|
||||||
RG_HTPLL_IC_MASK);
|
RG_HTPLL_IC_MASK);
|
||||||
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
|
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
|
||||||
@@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
|
||||||
|
unsigned long parent_rate)
|
||||||
|
{
|
||||||
|
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
|
||||||
|
unsigned long out_rate, val;
|
||||||
|
|
||||||
|
val = (readl(hdmi_phy->regs + HDMI_CON6)
|
||||||
|
& RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
|
||||||
|
switch (val) {
|
||||||
|
case 0x00:
|
||||||
|
out_rate = parent_rate;
|
||||||
|
break;
|
||||||
|
case 0x01:
|
||||||
|
out_rate = parent_rate / 2;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
out_rate = parent_rate / 4;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
val = (readl(hdmi_phy->regs + HDMI_CON6)
|
||||||
|
& RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
|
||||||
|
out_rate *= (val + 1) * 2;
|
||||||
|
val = (readl(hdmi_phy->regs + HDMI_CON2)
|
||||||
|
& RG_HDMITX_TX_POSDIV_MASK);
|
||||||
|
out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
|
||||||
|
|
||||||
|
if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
|
||||||
|
out_rate /= 5;
|
||||||
|
|
||||||
|
return out_rate;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
|
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
|
||||||
.prepare = mtk_hdmi_pll_prepare,
|
.prepare = mtk_hdmi_pll_prepare,
|
||||||
.unprepare = mtk_hdmi_pll_unprepare,
|
.unprepare = mtk_hdmi_pll_unprepare,
|
||||||
@@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
|
|||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
||||||
usleep_range(80, 100);
|
usleep_range(80, 100);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
|
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
||||||
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
||||||
@@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
|
|||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
|
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
|
||||||
usleep_range(80, 100);
|
usleep_range(80, 100);
|
||||||
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
|
||||||
@@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
|
|||||||
|
|
||||||
struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
|
struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
|
||||||
.tz_disabled = true,
|
.tz_disabled = true,
|
||||||
|
.flags = CLK_SET_RATE_GATE,
|
||||||
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
|
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
|
||||||
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
|
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
|
||||||
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
|
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
|
||||||
|
|||||||
@@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
|
|||||||
usleep_range(100, 150);
|
usleep_range(100, 150);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||||
|
unsigned long *parent_rate)
|
||||||
|
{
|
||||||
|
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
|
||||||
|
|
||||||
|
hdmi_phy->pll_rate = rate;
|
||||||
|
if (rate <= 74250000)
|
||||||
|
*parent_rate = rate;
|
||||||
|
else
|
||||||
|
*parent_rate = rate / 2;
|
||||||
|
|
||||||
|
return rate;
|
||||||
|
}
|
||||||
|
|
||||||
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||||
unsigned long parent_rate)
|
unsigned long parent_rate)
|
||||||
{
|
{
|
||||||
@@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
|
||||||
|
unsigned long parent_rate)
|
||||||
|
{
|
||||||
|
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
|
||||||
|
|
||||||
|
return hdmi_phy->pll_rate;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
|
static const struct clk_ops mtk_hdmi_phy_pll_ops = {
|
||||||
.prepare = mtk_hdmi_pll_prepare,
|
.prepare = mtk_hdmi_pll_prepare,
|
||||||
.unprepare = mtk_hdmi_pll_unprepare,
|
.unprepare = mtk_hdmi_pll_unprepare,
|
||||||
@@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
|
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
|
||||||
|
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
|
||||||
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
|
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
|
||||||
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
|
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
|
||||||
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
|
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
|
||||||
|
|||||||
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
|
|||||||
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
|
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
|
||||||
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
|
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
|
||||||
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
|
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
|
||||||
|
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
|
||||||
hdmi4_core_disable(core);
|
hdmi4_core_disable(core);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize CEC clock divider: CEC needs 2MHz clock hence
|
||||||
|
* set the divider to 24 to get 48/24=2MHz clock
|
||||||
|
*/
|
||||||
|
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
|
||||||
|
|
||||||
/* Clear TX FIFO */
|
/* Clear TX FIFO */
|
||||||
if (!hdmi_cec_clear_tx_fifo(adap)) {
|
if (!hdmi_cec_clear_tx_fifo(adap)) {
|
||||||
pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
|
pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
|
||||||
return -EIO;
|
err = -EIO;
|
||||||
|
goto err_disable_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear RX FIFO */
|
/* Clear RX FIFO */
|
||||||
if (!hdmi_cec_clear_rx_fifo(adap)) {
|
if (!hdmi_cec_clear_rx_fifo(adap)) {
|
||||||
pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
|
pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
|
||||||
return -EIO;
|
err = -EIO;
|
||||||
|
goto err_disable_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear CEC interrupts */
|
/* Clear CEC interrupts */
|
||||||
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
|
|||||||
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
|
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_disable_clk:
|
||||||
|
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
|
||||||
|
hdmi4_core_disable(core);
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
|
static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
|
||||||
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
|
|||||||
return ret;
|
return ret;
|
||||||
core->wp = wp;
|
core->wp = wp;
|
||||||
|
|
||||||
/*
|
/* Disable clock initially, hdmi_cec_adap_enable() manages it */
|
||||||
* Initialize CEC clock divider: CEC needs 2MHz clock hence
|
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
|
||||||
* set the devider to 24 to get 48/24=2MHz clock
|
|
||||||
*/
|
|
||||||
REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
|
|
||||||
|
|
||||||
ret = cec_register_adapter(core->adap, &pdev->dev);
|
ret = cec_register_adapter(core->adap, &pdev->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
|||||||
@@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
|
|||||||
else
|
else
|
||||||
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
|
acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
|
||||||
/*
|
/*
|
||||||
* The I2S input word length is twice the lenght given in the IEC-60958
|
* The I2S input word length is twice the length given in the IEC-60958
|
||||||
* status word. If the word size is greater than
|
* status word. If the word size is greater than
|
||||||
* 20 bits, increment by one.
|
* 20 bits, increment by one.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -48,8 +48,13 @@ static enum drm_mode_status
|
|||||||
sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
|
sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
|
||||||
const struct drm_display_mode *mode)
|
const struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
/* This is max for HDMI 2.0b (4K@60Hz) */
|
/*
|
||||||
if (mode->clock > 594000)
|
* Controller support maximum of 594 MHz, which correlates to
|
||||||
|
* 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
|
||||||
|
* 340 MHz scrambling has to be enabled. Because scrambling is
|
||||||
|
* not yet implemented, just limit to 340 MHz for now.
|
||||||
|
*/
|
||||||
|
if (mode->clock > 340000)
|
||||||
return MODE_CLOCK_HIGH;
|
return MODE_CLOCK_HIGH;
|
||||||
|
|
||||||
return MODE_OK;
|
return MODE_OK;
|
||||||
|
|||||||
@@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
|
|||||||
|
|
||||||
err_unregister_gates:
|
err_unregister_gates:
|
||||||
for (i = 0; i < CLK_NUM; i++)
|
for (i = 0; i < CLK_NUM; i++)
|
||||||
if (clk_data->hws[i])
|
if (!IS_ERR_OR_NULL(clk_data->hws[i]))
|
||||||
clk_hw_unregister_gate(clk_data->hws[i]);
|
clk_hw_unregister_gate(clk_data->hws[i]);
|
||||||
clk_disable_unprepare(tcon_top->bus);
|
clk_disable_unprepare(tcon_top->bus);
|
||||||
err_assert_reset:
|
err_assert_reset:
|
||||||
@@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
|
|||||||
|
|
||||||
of_clk_del_provider(dev->of_node);
|
of_clk_del_provider(dev->of_node);
|
||||||
for (i = 0; i < CLK_NUM; i++)
|
for (i = 0; i < CLK_NUM; i++)
|
||||||
clk_hw_unregister_gate(clk_data->hws[i]);
|
if (clk_data->hws[i])
|
||||||
|
clk_hw_unregister_gate(clk_data->hws[i]);
|
||||||
|
|
||||||
clk_disable_unprepare(tcon_top->bus);
|
clk_disable_unprepare(tcon_top->bus);
|
||||||
reset_control_assert(tcon_top->rst);
|
reset_control_assert(tcon_top->rst);
|
||||||
|
|||||||
@@ -52,6 +52,7 @@ static struct drm_driver driver = {
|
|||||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
|
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
|
||||||
.load = udl_driver_load,
|
.load = udl_driver_load,
|
||||||
.unload = udl_driver_unload,
|
.unload = udl_driver_unload,
|
||||||
|
.release = udl_driver_release,
|
||||||
|
|
||||||
/* gem hooks */
|
/* gem hooks */
|
||||||
.gem_free_object_unlocked = udl_gem_free_object,
|
.gem_free_object_unlocked = udl_gem_free_object,
|
||||||
|
|||||||
@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
|
|||||||
|
|
||||||
int udl_driver_load(struct drm_device *dev, unsigned long flags);
|
int udl_driver_load(struct drm_device *dev, unsigned long flags);
|
||||||
void udl_driver_unload(struct drm_device *dev);
|
void udl_driver_unload(struct drm_device *dev);
|
||||||
|
void udl_driver_release(struct drm_device *dev);
|
||||||
|
|
||||||
int udl_fbdev_init(struct drm_device *dev);
|
int udl_fbdev_init(struct drm_device *dev);
|
||||||
void udl_fbdev_cleanup(struct drm_device *dev);
|
void udl_fbdev_cleanup(struct drm_device *dev);
|
||||||
|
|||||||
@@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev)
|
|||||||
udl_free_urb_list(dev);
|
udl_free_urb_list(dev);
|
||||||
|
|
||||||
udl_fbdev_cleanup(dev);
|
udl_fbdev_cleanup(dev);
|
||||||
udl_modeset_cleanup(dev);
|
|
||||||
kfree(udl);
|
kfree(udl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void udl_driver_release(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
udl_modeset_cleanup(dev);
|
||||||
|
drm_dev_fini(dev);
|
||||||
|
kfree(dev);
|
||||||
|
}
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
|
|||||||
|
|
||||||
static void host1x_channel_set_streamid(struct host1x_channel *channel)
|
static void host1x_channel_set_streamid(struct host1x_channel *channel)
|
||||||
{
|
{
|
||||||
#if HOST1X_HW >= 6
|
#if IS_ENABLED(CONFIG_IOMMU_API) && HOST1X_HW >= 6
|
||||||
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
|
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
|
||||||
u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
|
u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
|
||||||
|
|
||||||
|
|||||||
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
|
|||||||
int total_contexts;
|
int total_contexts;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned ngroups;
|
unsigned ngroups;
|
||||||
int qos_rmt_count;
|
int rmt_count;
|
||||||
int user_rmt_reduced;
|
int user_rmt_reduced;
|
||||||
u32 n_usr_ctxts;
|
u32 n_usr_ctxts;
|
||||||
u32 send_contexts = chip_send_contexts(dd);
|
u32 send_contexts = chip_send_contexts(dd);
|
||||||
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
|
|||||||
n_usr_ctxts = rcv_contexts - total_contexts;
|
n_usr_ctxts = rcv_contexts - total_contexts;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* each user context requires an entry in the RMT */
|
/*
|
||||||
qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
|
* The RMT entries are currently allocated as shown below:
|
||||||
if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
|
* 1. QOS (0 to 128 entries);
|
||||||
user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
|
* 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
|
||||||
|
* 3. VNIC (num_vnic_contexts).
|
||||||
|
* It should be noted that PSM FECN oversubscribe num_vnic_contexts
|
||||||
|
* entries of RMT because both VNIC and PSM could allocate any receive
|
||||||
|
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
|
||||||
|
* and PSM FECN must reserve an RMT entry for each possible PSM receive
|
||||||
|
* context.
|
||||||
|
*/
|
||||||
|
rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
|
||||||
|
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
|
||||||
|
user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
|
||||||
dd_dev_err(dd,
|
dd_dev_err(dd,
|
||||||
"RMT size is reducing the number of user receive contexts from %u to %d\n",
|
"RMT size is reducing the number of user receive contexts from %u to %d\n",
|
||||||
n_usr_ctxts,
|
n_usr_ctxts,
|
||||||
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
|
|||||||
u64 reg;
|
u64 reg;
|
||||||
int i, idx, regoff, regidx;
|
int i, idx, regoff, regidx;
|
||||||
u8 offset;
|
u8 offset;
|
||||||
|
u32 total_cnt;
|
||||||
|
|
||||||
/* there needs to be enough room in the map table */
|
/* there needs to be enough room in the map table */
|
||||||
if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
|
total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
|
||||||
|
if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
|
||||||
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
|
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
|
|||||||
/* add rule 1 */
|
/* add rule 1 */
|
||||||
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
|
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
|
||||||
|
|
||||||
rmt->used += dd->num_user_contexts;
|
rmt->used += total_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize RSM for VNIC */
|
/* Initialize RSM for VNIC */
|
||||||
|
|||||||
@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
|
|||||||
if (!list_empty(&priv->s_iowait.list) &&
|
if (!list_empty(&priv->s_iowait.list) &&
|
||||||
!(qp->s_flags & RVT_S_BUSY) &&
|
!(qp->s_flags & RVT_S_BUSY) &&
|
||||||
!(priv->s_flags & RVT_S_BUSY)) {
|
!(priv->s_flags & RVT_S_BUSY)) {
|
||||||
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
|
qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
|
||||||
|
iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
|
||||||
|
iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
|
||||||
list_del_init(&priv->s_iowait.list);
|
list_del_init(&priv->s_iowait.list);
|
||||||
priv->s_iowait.lock = NULL;
|
priv->s_iowait.lock = NULL;
|
||||||
rvt_put_qp(qp);
|
rvt_put_qp(qp);
|
||||||
|
|||||||
@@ -3088,7 +3088,7 @@ send_last:
|
|||||||
update_ack_queue(qp, next);
|
update_ack_queue(qp, next);
|
||||||
}
|
}
|
||||||
e = &qp->s_ack_queue[qp->r_head_ack_queue];
|
e = &qp->s_ack_queue[qp->r_head_ack_queue];
|
||||||
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
|
if (e->rdma_sge.mr) {
|
||||||
rvt_put_mr(e->rdma_sge.mr);
|
rvt_put_mr(e->rdma_sge.mr);
|
||||||
e->rdma_sge.mr = NULL;
|
e->rdma_sge.mr = NULL;
|
||||||
}
|
}
|
||||||
@@ -3166,7 +3166,7 @@ send_last:
|
|||||||
update_ack_queue(qp, next);
|
update_ack_queue(qp, next);
|
||||||
}
|
}
|
||||||
e = &qp->s_ack_queue[qp->r_head_ack_queue];
|
e = &qp->s_ack_queue[qp->r_head_ack_queue];
|
||||||
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
|
if (e->rdma_sge.mr) {
|
||||||
rvt_put_mr(e->rdma_sge.mr);
|
rvt_put_mr(e->rdma_sge.mr);
|
||||||
e->rdma_sge.mr = NULL;
|
e->rdma_sge.mr = NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||||||
make_tid_rdma_ack(qp, ohdr, ps))
|
make_tid_rdma_ack(qp, ohdr, ps))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
|
/*
|
||||||
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
|
* Bail out if we can't send data.
|
||||||
goto bail;
|
* Be reminded that this check must been done after the call to
|
||||||
/* We are in the error state, flush the work request. */
|
* make_tid_rdma_ack() because the responding QP could be in
|
||||||
if (qp->s_last == READ_ONCE(qp->s_head))
|
* RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
|
||||||
goto bail;
|
*/
|
||||||
/* If DMAs are in progress, we can't flush immediately. */
|
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
|
||||||
if (iowait_sdma_pending(&priv->s_iowait)) {
|
goto bail;
|
||||||
qp->s_flags |= RVT_S_WAIT_DMA;
|
|
||||||
goto bail;
|
|
||||||
}
|
|
||||||
clear_ahg(qp);
|
|
||||||
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
|
|
||||||
hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
|
|
||||||
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
|
|
||||||
/* will get called again */
|
|
||||||
goto done_free_tx;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (priv->s_flags & RVT_S_WAIT_ACK)
|
if (priv->s_flags & RVT_S_WAIT_ACK)
|
||||||
goto bail;
|
goto bail;
|
||||||
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
|||||||
hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
|
hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
|
||||||
middle, ps);
|
middle, ps);
|
||||||
return 1;
|
return 1;
|
||||||
done_free_tx:
|
|
||||||
hfi1_put_txreq(ps->s_txreq);
|
|
||||||
ps->s_txreq = NULL;
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
bail:
|
bail:
|
||||||
hfi1_put_txreq(ps->s_txreq);
|
hfi1_put_txreq(ps->s_txreq);
|
||||||
bail_no_tx:
|
bail_no_tx:
|
||||||
|
|||||||
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
|||||||
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
|
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
|
||||||
dma_offset = offset = idx_offset * table->obj_size;
|
dma_offset = offset = idx_offset * table->obj_size;
|
||||||
} else {
|
} else {
|
||||||
|
u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
|
||||||
|
|
||||||
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
|
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
|
||||||
/* mtt mhop */
|
/* mtt mhop */
|
||||||
i = mhop.l0_idx;
|
i = mhop.l0_idx;
|
||||||
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
|||||||
hem_idx = i;
|
hem_idx = i;
|
||||||
|
|
||||||
hem = table->hem[hem_idx];
|
hem = table->hem[hem_idx];
|
||||||
dma_offset = offset = (obj & (table->num_obj - 1)) *
|
dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
|
||||||
table->obj_size % mhop.bt_chunk_size;
|
mhop.bt_chunk_size;
|
||||||
if (mhop.hop_num == 2)
|
if (mhop.hop_num == 2)
|
||||||
dma_offset = offset = 0;
|
dma_offset = offset = 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
|
|||||||
struct hns_roce_hem_table *table;
|
struct hns_roce_hem_table *table;
|
||||||
dma_addr_t dma_handle;
|
dma_addr_t dma_handle;
|
||||||
__le64 *mtts;
|
__le64 *mtts;
|
||||||
u32 s = start_index * sizeof(u64);
|
|
||||||
u32 bt_page_size;
|
u32 bt_page_size;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mtts = hns_roce_table_find(hr_dev, table,
|
mtts = hns_roce_table_find(hr_dev, table,
|
||||||
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
|
mtt->first_seg +
|
||||||
|
start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
|
||||||
&dma_handle);
|
&dma_handle);
|
||||||
if (!mtts)
|
if (!mtts)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user