Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
LoongArch: Add process management
Browse files Browse the repository at this point in the history
Add process management support for LoongArch, including: thread info
definition, context switch and process tracing.

Reviewed-by: WANG Xuerui <git@xen0n.name>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
  • Loading branch information
chenhuacai committed Jun 3, 2022
1 parent 0603839 commit 803b0fc
Show file tree
Hide file tree
Showing 14 changed files with 1,865 additions and 0 deletions.
129 changes: 129 additions & 0 deletions arch/loongarch/include/asm/fpu.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_FPU_H
#define _ASM_FPU_H

#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/ptrace.h>
#include <linux/thread_info.h>
#include <linux/bitops.h>

#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/current.h>
#include <asm/loongarch.h>
#include <asm/processor.h>
#include <asm/ptrace.h>

struct sigcontext;

extern void _init_fpu(unsigned int);
extern void _save_fp(struct loongarch_fpu *);
extern void _restore_fp(struct loongarch_fpu *);

/*
* Mask the FCSR Cause bits according to the Enable bits, observing
* that Unimplemented is always enabled.
*/
static inline unsigned long mask_fcsr_x(unsigned long fcsr)
{
return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
}

static inline int is_fp_enabled(void)
{
return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
1 : 0;
}

#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)

#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)

#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)

static inline int is_fpu_owner(void)
{
return test_thread_flag(TIF_USEDFPU);
}

static inline void __own_fpu(void)
{
enable_fpu();
set_thread_flag(TIF_USEDFPU);
KSTK_EUEN(current) |= CSR_EUEN_FPEN;
}

static inline void own_fpu_inatomic(int restore)
{
if (cpu_has_fpu && !is_fpu_owner()) {
__own_fpu();
if (restore)
_restore_fp(&current->thread.fpu);
}
}

static inline void own_fpu(int restore)
{
preempt_disable();
own_fpu_inatomic(restore);
preempt_enable();
}

static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
{
if (is_fpu_owner()) {
if (save)
_save_fp(&tsk->thread.fpu);
disable_fpu();
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
}
KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
}

static inline void lose_fpu(int save)
{
preempt_disable();
lose_fpu_inatomic(save, current);
preempt_enable();
}

static inline void init_fpu(void)
{
unsigned int fcsr = current->thread.fpu.fcsr;

__own_fpu();
_init_fpu(fcsr);
set_used_math();
}

static inline void save_fp(struct task_struct *tsk)
{
if (cpu_has_fpu)
_save_fp(&tsk->thread.fpu);
}

static inline void restore_fp(struct task_struct *tsk)
{
if (cpu_has_fpu)
_restore_fp(&tsk->thread.fpu);
}

static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
{
if (tsk == current) {
preempt_disable();
if (is_fpu_owner())
_save_fp(&current->thread.fpu);
preempt_enable();
}

return tsk->thread.fpu.fpr;
}

#endif /* _ASM_FPU_H */
9 changes: 9 additions & 0 deletions arch/loongarch/include/asm/idle.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_IDLE_H
#define __ASM_IDLE_H

#include <linux/linkage.h>

extern asmlinkage void __arch_cpu_idle(void);

#endif /* __ASM_IDLE_H */
16 changes: 16 additions & 0 deletions arch/loongarch/include/asm/mmu.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_MMU_H
#define __ASM_MMU_H

#include <linux/atomic.h>
#include <linux/spinlock.h>

typedef struct {
u64 asid[NR_CPUS];
void *vdso;
} mm_context_t;

#endif /* __ASM_MMU_H */
152 changes: 152 additions & 0 deletions arch/loongarch/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Switch a MMU context.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_MMU_CONTEXT_H
#define _ASM_MMU_CONTEXT_H

#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/smp.h>
#include <linux/slab.h>

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>

/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
static inline u64 asid_version_mask(unsigned int cpu)
{
return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
}

static inline u64 asid_first_version(unsigned int cpu)
{
return cpu_asid_mask(&cpu_data[cpu]) + 1;
}

#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))

static inline int asid_valid(struct mm_struct *mm, unsigned int cpu)
{
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
return 0;

return 1;
}

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/* Normal, classic get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
u64 asid = asid_cache(cpu);

if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
local_flush_tlb_user(); /* start new asid cycle */

cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}

/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;

for_each_possible_cpu(i)
cpu_context(i, mm) = 0;

return 0;
}

static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();

/* Check if our ASID is of an older version and thus invalid */
if (!asid_valid(next, cpu))
get_new_mmu_context(next, cpu);

write_csr_asid(cpu_asid(cpu, next));

if (next != &init_mm)
csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
else
csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);

/*
* Mark current->active_mm as not "active" anymore.
* We don't want to mislead possible IPI tlb flush routines.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
}

#define switch_mm_irqs_off switch_mm_irqs_off

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long flags;

local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
}

/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
}

#define activate_mm(prev, next) switch_mm(prev, next, current)
#define deactivate_mm(task, mm) do { } while (0)

/*
* If mm is currently active, we can't really drop it.
* Instead, we will get a new one for it.
*/
static inline void
drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
{
int asid;
unsigned long flags;

local_irq_save(flags);

asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);

if (asid == cpu_asid(cpu, mm)) {
if (!current->mm || (current->mm == mm)) {
get_new_mmu_context(mm, cpu);
write_csr_asid(cpu_asid(cpu, mm));
goto out;
}
}

/* Will get a new context next time */
cpu_context(cpu, mm) = 0;
cpumask_clear_cpu(cpu, mm_cpumask(mm));
out:
local_irq_restore(flags);
}

#endif /* _ASM_MMU_CONTEXT_H */
Loading

0 comments on commit 803b0fc

Please sign in to comment.