- Feb 10, 2017
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jan 30, 2017
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.9.6-rt4 patch set. Changes since v4.9.6-rt3: - Since the timer(s)-softirq split we could delay the wakeup the timer sofitrq. Patch by Mike Galbraith - The CPUSET code maybe be called with disabled interrupts and requires raw locks for it to succeed. Patch by Mike Galbraith. - The workaround for the radix preload code was not perfect and could cause failure if the code relied on it. Reported by Mike Galbraith. - Qualcomm's pinctrl driver got rawlocks. Reported by Brian Wrenn, patched by Julia Cartwright. - On X86, setting / changing page attributes could lead to an expensive (in terms of latency) but efficient (in terms of runtime) cache flush. In order no to hurt the latency the expensive cache has been disabled. Patch by John Ogness. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against v4.9.6-rt4 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/incr/patch-4.9.6-rt3-rt4.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.9.6-rt4 The RT patch against v4.9.6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patch-4.9.6-rt4.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.6-rt4.tar.xz Sebastian diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, int in_flags, struct page **pages) { unsigned int i, level; +#ifdef CONFIG_PREEMPT + /* + * Avoid wbinvd() because it causes latencies on all CPUs, + * regardless of any CPU isolation that may be in effect. + */ + unsigned long do_wbinvd = 0; +#else unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ +#endif BUG_ON(irqs_disabled()); diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -61,7 +61,7 @@ struct msm_pinctrl { struct notifier_block restart_nb; int irq; - spinlock_t lock; + raw_spinlock_t lock; DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO); @@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, if (WARN_ON(i == g->nfuncs)) return -EINVAL; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->ctl_reg); val &= ~mask; val |= i << g->mux_bit; writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_OUTPUT: /* set output value */ - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->io_reg); if (arg) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); writel(val, pctrl->regs + g->io_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); /* enable output */ arg = 1; @@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, return -EINVAL; } - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->ctl_reg); val &= ~(mask << bit); val |= arg << bit; writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } return 0; @@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) g = &pctrl->soc->groups[offset]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->ctl_reg); val &= ~BIT(g->oe_bit); writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in g = &pctrl->soc->groups[offset]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->io_reg); if (value) @@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in val |= BIT(g->oe_bit); writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) g = &pctrl->soc->groups[offset]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->io_reg); if (value) @@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) val &= ~BIT(g->out_bit); writel(val, pctrl->regs + g->io_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } #ifdef CONFIG_DEBUG_FS @@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_cfg_reg); val &= ~BIT(g->intr_enable_bit); @@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) clear_bit(d->hwirq, pctrl->enabled_irqs); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static void msm_gpio_irq_unmask(struct irq_data *d) @@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_status_reg); val &= ~BIT(g->intr_status_bit); @@ -604,7 +604,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) set_bit(d->hwirq, pctrl->enabled_irqs); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static void msm_gpio_irq_ack(struct irq_data *d) @@ -617,7 +617,7 @@ static void msm_gpio_irq_ack(struct irq_data *d) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_status_reg); if (g->intr_ack_high) @@ -629,7 +629,7 @@ static void msm_gpio_irq_ack(struct irq_data *d) if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) @@ -642,7 +642,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); /* * For hw without possibility of detecting both edges @@ -716,7 +716,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) irq_set_handler_locked(d, handle_level_irq); @@ -732,11 +732,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) struct msm_pinctrl *pctrl = gpiochip_get_data(gc); unsigned long flags; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); irq_set_irq_wake(pctrl->irq, on); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -882,7 +882,7 @@ int msm_pinctrl_probe(struct platform_device *pdev, pctrl->soc = soc_data; pctrl->chip = msm_gpio_template; - spin_lock_init(&pctrl->lock); + raw_spin_lock_init(&pctrl->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pctrl->regs = devm_ioremap_resource(&pdev->dev, res); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -289,19 +289,11 @@ unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long *indices, unsigned long first_index, unsigned int max_items); -#ifdef CONFIG_PREEMPT_RT_FULL -static inline int radix_tree_preload(gfp_t gm) { return 0; } -static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } -static inline int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) -{ - return 0; -}; - -#else int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); -#endif +void radix_tree_preload_end(void); + void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -324,11 +316,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); -static inline void radix_tree_preload_end(void) -{ - preempt_enable_nort(); -} - /** * struct radix_tree_iter - radix tree iterator state * diff --git a/kernel/cpuset.c b/kernel/cpuset.c --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -284,7 +284,7 @@ static struct cpuset top_cpuset = { */ static DEFINE_MUTEX(cpuset_mutex); -static DEFINE_SPINLOCK(callback_lock); +static DEFINE_RAW_SPINLOCK(callback_lock); static struct workqueue_struct *cpuset_migrate_mm_wq; @@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) continue; rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cp->effective_cpus, new_cpus); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); @@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) return retval; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* use trialcs->cpus_allowed as a temp variable */ update_cpumasks_hier(cs, trialcs->cpus_allowed); @@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) continue; rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cp->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && !nodes_equal(cp->mems_allowed, cp->effective_mems)); @@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) goto done; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->mems_allowed = trialcs->mems_allowed; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* use trialcs->mems_allowed as a temp variable */ update_nodemasks_hier(cs, &trialcs->mems_allowed); @@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) || (is_spread_page(cs) != is_spread_page(trialcs))); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->flags = trialcs->flags; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) rebuild_sched_domains_locked(); @@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) cpuset_filetype_t type = seq_cft(sf)->private; int ret = 0; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); switch (type) { case FILE_CPULIST: @@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) ret = -EINVAL; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); return ret; } @@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cpuset_inc(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { cpumask_copy(cs->effective_cpus, parent->effective_cpus); cs->effective_mems = parent->effective_mems; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) goto out_unlock; @@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) } rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); return 0; @@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) static void cpuset_bind(struct cgroup_subsys_state *root_css) { mutex_lock(&cpuset_mutex); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); @@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) top_cpuset.mems_allowed = top_cpuset.effective_mems; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); mutex_unlock(&cpuset_mutex); } @@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, { bool is_empty; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, new_cpus); cpumask_copy(cs->effective_cpus, new_cpus); cs->mems_allowed = *new_mems; cs->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* * Don't call update_tasks_cpumask() if the cpuset becomes empty, @@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs, if (nodes_empty(*new_mems)) *new_mems = parent_cs(cs)->effective_mems; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->effective_cpus, new_cpus); cs->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (cpus_updated) update_tasks_cpumask(cs); @@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work) /* synchronize cpus_allowed to cpu_active_mask */ if (cpus_updated) { - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (!on_dfl) cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); cpumask_copy(top_cpuset.effective_cpus, &new_cpus); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* we don't mess with cpumasks of tasks in top_cpuset */ } /* synchronize mems_allowed to N_MEMORY */ if (mems_updated) { - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (!on_dfl) top_cpuset.mems_allowed = new_mems; top_cpuset.effective_mems = new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); update_tasks_nodemask(&top_cpuset); } @@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) { unsigned long flags; - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); guarantee_online_cpus(task_cs(tsk), pmask); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); } void cpuset_cpus_allowed_fallback(struct task_struct *tsk) @@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) nodemask_t mask; unsigned long flags; - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); guarantee_online_mems(task_cs(tsk), &mask); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); return mask; } @@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) return true; /* Not hardwall and node outside mems_allowed: scan up cpusets */ - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); cs = nearest_hardwall_ancestor(task_cs(current)); allowed = node_isset(node, cs->mems_allowed); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); return allowed; } diff --git a/kernel/softirq.c b/kernel/softirq.c --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -206,6 +206,7 @@ static void handle_softirq(unsigned int vec_nr) } } +#ifndef CONFIG_PREEMPT_RT_FULL /* * If ksoftirqd is scheduled, we do not want to process pending softirqs * right now. Let ksoftirqd handle this at its own rate, to get fairness. @@ -217,7 +218,6 @@ static bool ksoftirqd_running(void) return tsk && (tsk->state == TASK_RUNNING); } -#ifndef CONFIG_PREEMPT_RT_FULL static inline int ksoftirqd_softirq_pending(void) { return local_softirq_pending(); @@ -794,13 +794,10 @@ void irq_enter(void) static inline void invoke_softirq(void) { -#ifdef CONFIG_PREEMPT_RT_FULL - unsigned long flags; -#endif - +#ifndef CONFIG_PREEMPT_RT_FULL if (ksoftirqd_running()) return; -#ifndef CONFIG_PREEMPT_RT_FULL + if (!force_irqthreads) { #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* @@ -821,6 +818,7 @@ static inline void invoke_softirq(void) wakeup_softirqd(); } #else /* PREEMPT_RT_FULL */ + unsigned long flags; local_irq_save(flags); if (__this_cpu_read(ksoftirqd) && diff --git a/lib/radix-tree.c b/lib/radix-tree.c --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -36,7 +36,7 @@ #include <linux/bitops.h> #include <linux/rcupdate.h> #include <linux/preempt.h> /* in_interrupt() */ - +#include <linux/locallock.h> /* Number of nodes in fully populated tree of given height */ static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; @@ -68,6 +68,7 @@ struct radix_tree_preload { struct radix_tree_node *nodes; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); static inline void *node_to_entry(void *ptr) { @@ -290,14 +291,14 @@ radix_tree_node_alloc(struct radix_tree_root *root) * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = &get_cpu_var(radix_tree_preloads); + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes; rtp->nodes = ret->private_data; ret->private_data = NULL; rtp->nr--; } - put_cpu_var(radix_tree_preloads); + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); /* * Update the allocation stack trace as this is more useful * for debugging. @@ -337,7 +338,6 @@ radix_tree_node_free(struct radix_tree_node *node) call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } -#ifndef CONFIG_PREEMPT_RT_FULL /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On @@ -359,14 +359,14 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr) */ gfp_mask &= ~__GFP_ACCOUNT; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { - preempt_enable(); + local_unlock(radix_tree_preloads_lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->private_data = rtp->nodes; @@ -408,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ - preempt_disable(); + local_lock(radix_tree_preloads_lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); @@ -424,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) /* Preloading doesn't help anything with this gfp mask, skip it */ if (!gfpflags_allow_blocking(gfp_mask)) { - preempt_disable(); + local_lock(radix_tree_preloads_lock); return 0; } @@ -457,7 +457,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) return __radix_tree_preload(gfp_mask, nr_nodes); } -#endif + +void radix_tree_preload_end(void) +{ + local_unlock(radix_tree_preloads_lock); +} +EXPORT_SYMBOL(radix_tree_preload_end); /* * The maximum index which can be stored in a radix tree diff --git a/localversion-rt b/localversion-rt --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt3 +-rt4 Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jan 20, 2017
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Dec 23, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.9-rt1 patch set. Please don't download and boots this before Christmas Eve. Changes since v4.8.15-rt10 - rebase to v4.9 Known issues - CPU hotplug got a little better but can deadlock. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.9-rt1 The RT patch against v4.9 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patch-4.9-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9-rt1.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.8.15-rt10 patch set. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8.15-rt10 The RT patch against v4.8.15 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patch-4.8.15-rt10.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patches-4.8.15-rt10.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Dec 12, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.8.14-rt9 patch set. Changes since v4.8.14-rt8: - If network interface is removed we move all skbs which are active to a list and free it later. The hunk where the list was cleaned up was lost and is back. - bnx2x and a few others could corrupt their ->poll_list. Patch by Steven Rostedt. - A missing RCU section in the workqueue code could lead to a "use after free" condition if the worqueue was removed. Reported by John Keeping Known issues - CPU hotplug got a little better but can deadlock. The delta patch against v4.8.14-rt9 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/incr/patch-4.8.14-rt8-rt9.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8.14-rt9 The RT patch against v4.8.14 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patch-4.8.14-rt9.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patches-4.8.14-rt9.tar.xz Sebastian diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -395,7 +395,19 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); + +/* + * When PREEMPT_RT_FULL is defined, all device interrupt handlers + * run as threads, and they can also be preempted (without PREEMPT_RT + * interrupt threads can not be preempted). Which means that calling + * __napi_schedule_irqoff() from an interrupt handler can be preempted + * and can corrupt the napi->poll_list. + */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define __napi_schedule_irqoff(n) __napi_schedule(n) +#else void __napi_schedule_irqoff(struct napi_struct *n); +#endif static inline bool napi_disable_pending(struct napi_struct *n) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1127,9 +1127,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) * As both pwqs and pools are RCU protected, the * following lock operations are safe. */ + rcu_read_lock(); local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); put_pwq(pwq); local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); + rcu_read_unlock(); } } diff --git a/localversion-rt b/localversion-rt --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt8 +-rt9 diff --git a/net/core/dev.c b/net/core/dev.c --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4912,6 +4912,7 @@ void __napi_schedule(struct napi_struct *n) } EXPORT_SYMBOL(__napi_schedule); +#ifndef CONFIG_PREEMPT_RT_FULL /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule @@ -4923,6 +4924,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); +#endif void __napi_complete(struct napi_struct *n) { @@ -5212,13 +5214,21 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; + struct sk_buff_head tofree_q; + struct sk_buff *skb; LIST_HEAD(list); LIST_HEAD(repoll); + __skb_queue_head_init(&tofree_q); + local_irq_disable(); + skb_queue_splice_init(&sd->tofree_queue, &tofree_q); list_splice_init(&sd->poll_list, &list); local_irq_enable(); + while ((skb = __skb_dequeue(&tofree_q))) + kfree_skb(skb); + for (;;) { struct napi_struct *n; Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Dec 01, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.8.11-rt7 patch set. Changes since v4.8.11-rt6: - A fix for a race in the futex/rtmutex code which was there since the very beginning. Reported by David Daney, fixed Thomas Gleixner - A fix for the kprobe code on ARM by Yang Shi. - It is no longer possible to force an expedited RCU grace period on -RT. We had one spot in the network where it was disabled on RT due to high latencies it caused. Suggested by Luiz Capitulino and patched by Julia Cartwright. - Expedited RCU grace periods are now forced during boot which should speed the boot process (even on -RT). Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.8.11-rt6 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/incr/patch-4.8.11-rt6-rt7.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8.11-rt7 The RT patch against 4.8.11 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patch-4.8.11-rt7.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patches-4.8.11-rt7.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Oct 31, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.8.6-rt5 patch set. Changes since v4.8.6-rt5: - Added `-no-PIE' to the Makefile for AFLAGS as well. This breaks gcc 3.2. Is someone here still using it? - The NFS fixup was slightly adjusted to avoid disabling preemption on !RT. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.8.6-rt5 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/incr/patch-4.8.6-rt4-rt5.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8.6-rt5 The RT patch against 4.8.6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patch-4.8.6-rt5.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patches-4.8.6-rt5.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Oct 24, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.8.2-rt3 patch set. Changes since v4.8.2-rt2: - The connector subsystem could sleep in invalid context. Found and fixed by Mike Galbraith - zram / zcomp has shown new warnings. The warnings have been addressed and an old error fixed (Mike Galbraith) - The ftrace header was off slightly and the ascii arrow were point to the wrong direction (Mike Galbraith) - On CPU-down (CPU hotplug) we could attemp to sleep in wrong context. (Mike Galbraith) - Removed an unused static variable in RXPRC (noticed by kbuild test robot) - ifdefed a variable in APIC so we don't get this "unused variable" warning on certain configurations (noticed by kbuild test robot) - Added `-no-PIE' to the Makefile. This breaks gcc 3.2. Is someone here still using it? - Fixed docbook in two places (noticed by kbuild test robot) - Fixed compile on sparc which was broken after I moved RCU headers (noticed by kbuild test robot) - The kbuild test robot sent me a warning about sleeping in invalid context in the NFS4 code. I didn't manage to reproduce this myself but the warning is valid. I attempted to fix this and will wait for robot's feedback :) - Lazy preempt was broken on x86-32. Fixed by Paul Gortmaker. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.8.2-rt3 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/incr/patch-4.8.2-rt2-rt3.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8.2-rt3 The RT patch against 4.6.5 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patch-4.8.2-rt3.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/patches-4.8.2-rt3.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Oct 17, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Oct 06, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.8-rt1 patch set. Changes since v4.6.7-rt14: - rebased to v4.8 Known issues - CPU hotplug got a little better but can deadlock. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.8-rt1 The RT patch against 4.8 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patch-4.8-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.8/older/patches-4.8-rt1.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Sep 30, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.7-rt14 patch set. Changes since v4.6.7-rt13: - It is possible that a task which got priority boosted, de-boosted itself too early in the unlock path. This leads to then priority inversion because the waiter with priority waits while a process with lower priority runs. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.7-rt12 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.7-rt13-rt14.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.7-rt14 The RT patch against 4.6.7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.7-rt14.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.7-rt14.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Sep 15, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.7-rt13 patch set. Changes since v4.6.7-rt12: - The dcache regression fix up introduced another problem. As pointed out by Thomas Gleixner we can't avoid cpu_chill() for !RT tasks because the owner might be preempted and we would spin until our time slice is used up. Therefore the sched class is ignored and we "chill" if the lock is taken and cond_resched() did not work. - Never gcc make some noise if __builtin_return_address(x) with x > 1 is used. Warning can be ignored by a config option (Steven Rostedt) - might_resched() on x86 with lazy preempt might ignores the preemption counter. Now no more. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.7-rt12 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.7-rt12-rt13.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.7-rt13 The RT patch against 4.6.5 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.7-rt13.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.7-rt13.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Sep 08, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.7-rt12 patch set. Changes since v4.6.7-rt11: - The update to v4.6.7-rt11 introduced a performance regression especially visible when compiling a kernel on /dev/shm. It is fixed by invoking less often the "chill" function. Reported by Joakim Hernberg. - We had a fix in v3.12.8-rt11 for ip_send_unicast_reply() which I dropped in v3.18.8 based -RT due code change and I assumed the need for extra serialization is no longer required. As it turns out it is still required :) - While looking around a similar serialisation might be required in icmp_sk(). No crash has been observed, this is just precaution. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.7-rt11 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.7-rt11-rt12.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.7-rt12 The RT patch against 4.6.5 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.7-rt12.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.7-rt12.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Aug 22, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Aug 05, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.5-rt10 patch set. Changes since v4.6.5-rt9: - Added missing cpu_light_get() in the scsi fcoe driver. Patch by Mike Galbraith. - Under a special a condition (a preempted TLB flush in do_exit()) it was possible on x86-UP to enter endless pagefaults. The pagefaults stopped after a context switch. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.5-rt9 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.5-rt9-rt10.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.5-rt10 The RT patch against 4.6.5 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.5-rt10.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.5-rt10.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jul 29, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jul 15, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.4-rt8 patch set. Changes since v4.6.4-rt8: - Import Thomas' timer rework known as "timer: Refactor the timer wheel" patch set which made its way into the -TIP tree. With this changes we get NOHZ_FULL working. Finally. - Avoid warning of an unused symbol in the !RT case (preemptible_lazy()) - Replace the "trace event preempt count" fixup with Steven's version. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.4-rt8 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.4-rt7-rt8.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.4-rt8 The RT patch against 4.6.4 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.4-rt8.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.4-rt8.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jul 14, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.4-rt7 patch set. Changes since v4.6.4-rt6: - Wake up all waiters of del_timer_sync(). Usually there should not be more than just one waiter (per timer base) but it possible to gain more. - Wake up the waiers of del_timer_sync() after dropping the base lock. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.4-rt6 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.4-rt6-rt7.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.4-rt7 The RT patch against 4.6.2 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.4-rt7.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.4-rt7.tar.xz Sebastian
-
- Jul 12, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jun 10, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.2-rt5 patch set. Changes since v4.6.2-rt4: - "Schedule while atomic" fixup in cgroup / memcontrol. Patch by Mike Galbraith. - Rename of "work-simple" to "swork" to align with "swait". Patch by Mike Galbraith. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.2-rt4 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.2-rt4-rt5.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.2-rt5 The RT patch against 4.6.2 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.2-rt5.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.2-rt5.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jun 03, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6.1-rt3 patch set. Changes since v4.6.1-rt2: - On return from interrupt on ARM we could schedule with lazy preempt count > 0 under some circumstances. It isn't toxic but it shouldn't happen. Noticed by Thomas Gleixner. - The way the preempt counter is accessed on non-x86 architectures allowed the compiler to reorder the code slightly. This led to decrementing the preempt counter, checking for the need resched bit followed by writing the counter back. An interrupt between the last two steps will lead to a missing preemption point and thus high latencies. Patch by Peter Zijlstra. - The recorded preemption counter in event trace points (such as raw_syscall_entry) are off by one because each trace point increments the counter. This has been corrected. - It is now ensured that there are no attempts to print from IRQ or NMI context. On certain events such as hard-lockup-detector we would attempt to grab sleeping locks. - Allow lru_add_drain_all() to perform its work remotely. Patch by Luiz Capitulino and Rik van Riel. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.1-rt2 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.1-rt2-rt3.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.1-rt3 The RT patch against 4.6.1 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.1-rt3.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.1-rt3.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Jun 02, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- May 13, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.6-rc7-rt1 patch set. I tested it on my AMD A10, 64bit. Had a few runs on ARM, nothing exploded so far. Changes since v4.4.9-rt17: - rebase to v4.6-rc7 - RWLOCKS and SPINLOCKS used to be cacheline aligned on RT. Now there are no more which is the same behaviour as in upstream. Known issues (inherited from v4.4-RT): - CPU hotplug got a little better but can deadlock. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6-rc7-rt1 The RT patch against 4.6-rc7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6-rc7-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6-rc7-rt1.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- May 06, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Apr 15, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.4.7-rt16 patch set. Changes since v4.4.7-rt15: - picked a few panic() re-entrance from NMI fixes from upstream. On -RT we have the same problem without NMI but with the soft/hard watchdog triggering panic(). - Don't take the port->lock on oops_in_progress. We had a trylock but that trylock does not work if invoked with IRQs off (like from the panic() caller). I am not very happy about this but if we keep it that way it would make sense to make a similar change for the other UART drivers… - Rik van Riel and Clark Williams pointed out that a change made by Frederic Weisbecker in v4.5 could be backported and then we could remove some locking around vtime handling. Known issues: - CPU hotplug got a little better but can deadlock. The delta patch against 4.4.7-rt15 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.7-rt15-rt16.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.7-rt16 The RT patch against 4.4.7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.7-rt16.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.7-rt16.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Apr 08, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.4.6-rt14 patch set. Changes since v4.4.6-rt13: - Dan Murphy reported that zram does not compile on !RT. Now it does. - The TWD timer is now enabled on IMX6Q configs even with !SMP. People with i.MX6SOLO should now see better cyclictest results with TWD than with the mxc-timer. Known issues: - CPU hotplug got a little better but can deadlock. The delta patch against 4.4.6-rt13 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.6-rt13-rt14.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.6-rt14 The RT patch against 4.4.6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.6-rt14.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.6-rt14.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Apr 01, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.4.6-rt13 patch set. Changes since v4.4.6-rt12: - Alexandre Belloni sent patch for the AT91 to get rid of the free_irq() warning. - Yang Shi sent a patch to address a "sleeping while atomic" warning in a writeback tracepoint. Until now it was disabled to avoid it, now it can be used again. - Rik van Riel sent a patch to make the kvm async pagefault code use a simple wait queue. - Mike Galbraith set a patch to address a "sleeping while atomic" warning in zsmalloc - Netork packets sent by a RT task could be delayed (but won't block the RT task) if a task with lower priority was interruped while sending a packet. This is address by taking a qdisc lock so the high-prio task can boost a task with lower priority. - Clark Williams reported a swait related complate_all() warning while comming out of suspend. Suspend to RAM (and hibernate) are now filtered out from the warning. - Mike Galbraith sent a patch to address a "sleeping while atomic" warning in the zram driver. - Josh Cartwright sent a patch to fix a lockdep splat in list_bl which was reported by Luis Claudio R. Goncalves. Known issues: - CPU hotplug got a little better but can deadlock. The delta patch against 4.4.6-rt12 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.6-rt12-rt13.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.6-rt13 The RT patch against 4.4.6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.6-rt13.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.6-rt13.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Mar 29, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Mar 09, 2016
-
-
Sebastian Andrzej Siewior authored
Dear RT folks! I'm pleased to announce the v4.4.4-rt11 patch set. Changes since v4.4.4-rt10: - A compile error has been fixed in conjunction with the latency tracer. - AT91 got a little better. A larger patch series by Boris Brezillon has been merged. That means SAMA5 boots now. There is one warning left about invoking free_irq() twice. I would be glad about some feedback from pre-SAMA5 SoC. Those which share UART and timer interrupt. It seems that that one I have here does not do this anymore. - A patch on top the AT91 series to avoid two warning while switching from period to oneshow mode. - Daniel Wagner refurbished the swait patches. This relase contains now what is queued for v4.6 in the TIP tree. Only RT uses `completion' based on swait. Based on the current implementation you will see a warning if complete_all() in invoked on more than two waiters. Please report if see this. Known issues: - CPU hotplug got a little better but can deadlock. The delta patch against 4.4.4-rt10 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.4-rt10-rt11.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.4-rt11 The RT patch against 4.4.4 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.4-rt11.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.4-rt11.tar.xz Sebastian Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- Feb 29, 2016
-
-
Thomas Gleixner authored
Dear RT folks! I'm pleased to announce the v4.4.3-rt9 patch set. v4.4.2-rt7 and v4.4.3-rt8 are non-announced updates to incorporate the linux-4.4.y stable tree. There is one change caused by the 4.4.3 update: The relaxed handling of dump_stack() on RT has been dropped as there is actually a potential deadlock lurking around the corner. See: commit d7ce3692 upstream. This does not effect the other facilities which gather stack traces. RT changes since v4.4.3-rt8: Clark Williams (1): rcu/torture: Comment out rcu_bh ops on PREEMPT_RT_FULL Josh Cartwright (1): sc16is7xx: Drop bogus use of IRQF_ONESHOT Mike Galbraith (4): sched,rt: __always_inline preemptible_lazy() locking/lglocks: Use preempt_enable/disable_nort() in lg_double_[un]lock drm,radeon,i915: Use preempt_disable/enable_rt() where recommended drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end() Sebastian Andrzej Siewior (1): kernel: sched: Fix preempt_disable_ip recording for preempt_disable() Thomas Gleixner (4): iommu/amd: Use WARN_ON_NORT in __attach_device() tick/broadcast: Make broadcast hrtimer irqsafe trace/writeback: Block cgroup path tracing on RT v4.4.3-rt9 Yang Shi (2): trace: Use rcuidle version for preemptoff_hist trace point f2fs: Mutex can't be used by down_write_nest_lock() Known issues: - bcache stays disabled - CPU hotplug is not better than before - The netlink_release() OOPS, reported by Clark, is still on the list, but unsolved due to lack of information The delta patch against 4.4.3-rt8 is appended below and can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.3-rt8-rt9.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.3-rt9 The RT patch against 4.4.3 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.3-rt9.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.3-rt9.tar.xz Enjoy! tglx Signed-off-by:
Thomas Gleixner <tglx@linutronix.de>
-
- Feb 28, 2016
-
-
Thomas Gleixner authored
Signed-off-by:
Thomas Gleixner <tglx@linutronix.de>
-
- Feb 25, 2016
-
-
Sebastian Andrzej Siewior authored
Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-