2016-05-25 18:37:40 +03:00
|
|
|
// Handling of stepper drivers.
|
|
|
|
//
|
2019-01-29 04:38:58 +03:00
|
|
|
// Copyright (C) 2016-2019 Kevin O'Connor <kevin@koconnor.net>
|
2016-05-25 18:37:40 +03:00
|
|
|
//
|
|
|
|
// This file may be distributed under the terms of the GNU GPLv3 license.
|
|
|
|
|
|
|
|
#include "autoconf.h" // CONFIG_*
|
2017-03-09 21:49:03 +03:00
|
|
|
#include "basecmd.h" // oid_alloc
|
2016-05-25 18:37:40 +03:00
|
|
|
#include "board/gpio.h" // gpio_out_write
|
2016-06-09 03:51:00 +03:00
|
|
|
#include "board/irq.h" // irq_disable
|
2017-03-25 06:01:08 +03:00
|
|
|
#include "board/misc.h" // timer_is_before
|
2016-05-25 18:37:40 +03:00
|
|
|
#include "command.h" // DECL_COMMAND
|
|
|
|
#include "sched.h" // struct timer
|
|
|
|
#include "stepper.h" // command_config_stepper
|
|
|
|
|
2019-03-05 07:35:11 +03:00
|
|
|
DECL_CONSTANT("STEP_DELAY", CONFIG_STEP_DELAY);
|
2019-01-09 21:10:23 +03:00
|
|
|
|
2016-05-25 18:37:40 +03:00
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
* Steppers
|
|
|
|
****************************************************************/
|
|
|
|
|
2017-03-08 19:29:27 +03:00
|
|
|
struct stepper_move {
|
2020-11-21 02:57:47 +03:00
|
|
|
struct move_node node;
|
2017-03-08 19:29:27 +03:00
|
|
|
uint32_t interval;
|
|
|
|
int16_t add;
|
|
|
|
uint16_t count;
|
|
|
|
uint8_t flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum { MF_DIR=1<<0 };
|
|
|
|
|
2016-05-25 18:37:40 +03:00
|
|
|
struct stepper {
|
|
|
|
struct timer time;
|
|
|
|
uint32_t interval;
|
|
|
|
int16_t add;
|
2019-03-10 02:18:24 +03:00
|
|
|
#if CONFIG_STEP_DELAY <= 0
|
|
|
|
uint_fast16_t count;
|
2016-06-14 02:42:34 +03:00
|
|
|
#define next_step_time time.waketime
|
|
|
|
#else
|
|
|
|
uint32_t count;
|
2017-08-10 04:24:06 +03:00
|
|
|
uint32_t next_step_time;
|
2016-06-14 02:42:34 +03:00
|
|
|
#endif
|
2016-05-25 18:37:40 +03:00
|
|
|
struct gpio_out step_pin, dir_pin;
|
|
|
|
uint32_t position;
|
2020-11-21 02:57:47 +03:00
|
|
|
struct move_queue_head mq;
|
2016-05-25 18:37:40 +03:00
|
|
|
// gcc (pre v6) does better optimization when uint8_t are bitfields
|
|
|
|
uint8_t flags : 8;
|
|
|
|
};
|
|
|
|
|
2017-03-05 23:00:15 +03:00
|
|
|
enum { POSITION_BIAS=0x40000000 };
|
|
|
|
|
2019-01-29 04:38:58 +03:00
|
|
|
enum {
|
|
|
|
SF_LAST_DIR=1<<0, SF_NEXT_DIR=1<<1, SF_INVERT_STEP=1<<2, SF_HAVE_ADD=1<<3,
|
2021-04-25 22:02:42 +03:00
|
|
|
SF_LAST_RESET=1<<4, SF_NEED_RESET=1<<5
|
2019-01-29 04:38:58 +03:00
|
|
|
};
|
2016-05-25 18:37:40 +03:00
|
|
|
|
|
|
|
// Setup a stepper for the next move in its queue
|
2016-06-09 04:33:50 +03:00
|
|
|
static uint_fast8_t
|
2017-08-10 04:24:06 +03:00
|
|
|
stepper_load_next(struct stepper *s, uint32_t min_next_time)
|
2016-05-25 18:37:40 +03:00
|
|
|
{
|
2020-11-21 02:57:47 +03:00
|
|
|
if (move_queue_empty(&s->mq)) {
|
2020-03-04 21:30:06 +03:00
|
|
|
// There is no next move - the queue is empty
|
2016-05-25 18:37:40 +03:00
|
|
|
s->count = 0;
|
|
|
|
return SF_DONE;
|
|
|
|
}
|
|
|
|
|
2020-03-04 21:30:06 +03:00
|
|
|
// Load next 'struct stepper_move' into 'struct stepper'
|
2020-11-21 02:57:47 +03:00
|
|
|
struct move_node *mn = move_queue_pop(&s->mq);
|
|
|
|
struct stepper_move *m = container_of(mn, struct stepper_move, node);
|
2016-06-14 02:42:34 +03:00
|
|
|
s->next_step_time += m->interval;
|
2016-05-25 18:37:40 +03:00
|
|
|
s->add = m->add;
|
2016-06-07 22:07:09 +03:00
|
|
|
s->interval = m->interval + m->add;
|
2019-03-10 02:18:24 +03:00
|
|
|
if (CONFIG_STEP_DELAY <= 0) {
|
|
|
|
if (CONFIG_MACH_AVR)
|
|
|
|
// On AVR see if the add can be optimized away
|
|
|
|
s->flags = m->add ? s->flags|SF_HAVE_ADD : s->flags & ~SF_HAVE_ADD;
|
2016-06-14 02:42:34 +03:00
|
|
|
s->count = m->count;
|
|
|
|
} else {
|
|
|
|
// On faster mcus, it is necessary to schedule unstep events
|
2017-08-10 04:24:06 +03:00
|
|
|
// and so there are twice as many events. Also check that the
|
|
|
|
// next step event isn't too close to the last unstep.
|
|
|
|
if (unlikely(timer_is_before(s->next_step_time, min_next_time))) {
|
|
|
|
if ((int32_t)(s->next_step_time - min_next_time)
|
|
|
|
< (int32_t)(-timer_from_us(1000)))
|
2017-09-06 04:15:24 +03:00
|
|
|
shutdown("Stepper too far in past");
|
2017-08-10 04:24:06 +03:00
|
|
|
s->time.waketime = min_next_time;
|
|
|
|
} else {
|
|
|
|
s->time.waketime = s->next_step_time;
|
|
|
|
}
|
2019-01-11 20:04:03 +03:00
|
|
|
s->count = (uint32_t)m->count * 2;
|
2016-06-14 02:42:34 +03:00
|
|
|
}
|
2020-03-04 21:30:06 +03:00
|
|
|
// Add all steps to s->position (stepper_get_position() can calc mid-move)
|
2016-05-25 18:37:40 +03:00
|
|
|
if (m->flags & MF_DIR) {
|
2016-06-14 02:42:34 +03:00
|
|
|
s->position = -s->position + m->count;
|
2018-05-16 00:04:30 +03:00
|
|
|
gpio_out_toggle_noirq(s->dir_pin);
|
2016-05-25 18:37:40 +03:00
|
|
|
} else {
|
2016-06-14 02:42:34 +03:00
|
|
|
s->position += m->count;
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
move_free(m);
|
|
|
|
return SF_RESCHEDULE;
|
|
|
|
}
|
|
|
|
|
2019-03-10 02:18:24 +03:00
|
|
|
// AVR optimized step function
|
|
|
|
static uint_fast8_t
|
|
|
|
stepper_event_avr(struct stepper *s)
|
|
|
|
{
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
|
|
|
uint_fast16_t count = s->count - 1;
|
|
|
|
if (likely(count)) {
|
|
|
|
s->count = count;
|
|
|
|
s->time.waketime += s->interval;
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
|
|
|
if (s->flags & SF_HAVE_ADD)
|
|
|
|
s->interval += s->add;
|
|
|
|
return SF_RESCHEDULE;
|
|
|
|
}
|
|
|
|
uint_fast8_t ret = stepper_load_next(s, 0);
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Optimized step function for stepping and unstepping in same function
|
|
|
|
static uint_fast8_t
|
|
|
|
stepper_event_nodelay(struct stepper *s)
|
|
|
|
{
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
|
|
|
uint_fast16_t count = s->count - 1;
|
|
|
|
if (likely(count)) {
|
|
|
|
s->count = count;
|
|
|
|
s->time.waketime += s->interval;
|
|
|
|
s->interval += s->add;
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
|
|
|
return SF_RESCHEDULE;
|
|
|
|
}
|
|
|
|
uint_fast8_t ret = stepper_load_next(s, 0);
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-05-25 18:37:40 +03:00
|
|
|
// Timer callback - step the given stepper.
|
2016-06-09 04:33:50 +03:00
|
|
|
uint_fast8_t
|
2016-05-25 18:37:40 +03:00
|
|
|
stepper_event(struct timer *t)
|
|
|
|
{
|
|
|
|
struct stepper *s = container_of(t, struct stepper, time);
|
2019-03-10 02:18:24 +03:00
|
|
|
if (CONFIG_STEP_DELAY <= 0 && CONFIG_MACH_AVR)
|
|
|
|
return stepper_event_avr(s);
|
|
|
|
if (CONFIG_STEP_DELAY <= 0)
|
|
|
|
return stepper_event_nodelay(s);
|
2016-06-14 02:42:34 +03:00
|
|
|
|
2019-03-10 02:18:24 +03:00
|
|
|
// Normal step code - schedule the unstep event
|
2020-05-25 07:32:12 +03:00
|
|
|
if (!CONFIG_HAVE_STRICT_TIMING)
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
2019-01-09 21:10:23 +03:00
|
|
|
uint32_t step_delay = timer_from_us(CONFIG_STEP_DELAY);
|
|
|
|
uint32_t min_next_time = timer_read_time() + step_delay;
|
2020-05-25 07:32:12 +03:00
|
|
|
if (CONFIG_HAVE_STRICT_TIMING)
|
|
|
|
// Toggling gpio after reading the time is a micro-optimization
|
|
|
|
gpio_out_toggle_noirq(s->step_pin);
|
2016-06-14 02:42:34 +03:00
|
|
|
s->count--;
|
2017-08-10 04:24:06 +03:00
|
|
|
if (likely(s->count & 1))
|
|
|
|
// Schedule unstep event
|
|
|
|
goto reschedule_min;
|
2016-06-14 02:42:34 +03:00
|
|
|
if (likely(s->count)) {
|
|
|
|
s->next_step_time += s->interval;
|
|
|
|
s->interval += s->add;
|
2017-08-10 04:24:06 +03:00
|
|
|
if (unlikely(timer_is_before(s->next_step_time, min_next_time)))
|
|
|
|
// The next step event is too close - push it back
|
|
|
|
goto reschedule_min;
|
2016-06-14 02:42:34 +03:00
|
|
|
s->time.waketime = s->next_step_time;
|
|
|
|
return SF_RESCHEDULE;
|
|
|
|
}
|
2017-08-10 04:24:06 +03:00
|
|
|
return stepper_load_next(s, min_next_time);
|
|
|
|
reschedule_min:
|
|
|
|
s->time.waketime = min_next_time;
|
2016-06-14 02:42:34 +03:00
|
|
|
return SF_RESCHEDULE;
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
command_config_stepper(uint32_t *args)
|
|
|
|
{
|
2017-03-09 21:49:03 +03:00
|
|
|
struct stepper *s = oid_alloc(args[0], command_config_stepper, sizeof(*s));
|
2016-05-25 18:37:40 +03:00
|
|
|
if (!CONFIG_INLINE_STEPPER_HACK)
|
|
|
|
s->time.func = stepper_event;
|
2021-04-25 22:02:42 +03:00
|
|
|
s->flags = args[3] ? SF_INVERT_STEP : 0;
|
2017-05-11 21:01:06 +03:00
|
|
|
s->step_pin = gpio_out_setup(args[1], s->flags & SF_INVERT_STEP);
|
2016-05-25 18:37:40 +03:00
|
|
|
s->dir_pin = gpio_out_setup(args[2], 0);
|
2017-03-05 23:00:15 +03:00
|
|
|
s->position = -POSITION_BIAS;
|
2020-11-21 02:57:47 +03:00
|
|
|
move_queue_setup(&s->mq, sizeof(struct stepper_move));
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
DECL_COMMAND(command_config_stepper,
|
2021-04-25 22:02:42 +03:00
|
|
|
"config_stepper oid=%c step_pin=%c dir_pin=%c invert_step=%c");
|
2016-05-25 18:37:40 +03:00
|
|
|
|
2017-03-05 23:00:15 +03:00
|
|
|
// Return the 'struct stepper' for a given stepper oid
|
|
|
|
struct stepper *
|
|
|
|
stepper_oid_lookup(uint8_t oid)
|
|
|
|
{
|
|
|
|
return oid_lookup(oid, command_config_stepper);
|
|
|
|
}
|
|
|
|
|
2016-05-25 18:37:40 +03:00
|
|
|
// Schedule a set of steps with a given timing
|
|
|
|
void
|
|
|
|
command_queue_step(uint32_t *args)
|
|
|
|
{
|
2017-03-05 23:00:15 +03:00
|
|
|
struct stepper *s = stepper_oid_lookup(args[0]);
|
2017-03-08 19:29:27 +03:00
|
|
|
struct stepper_move *m = move_alloc();
|
2016-05-25 18:37:40 +03:00
|
|
|
m->interval = args[1];
|
|
|
|
m->count = args[2];
|
|
|
|
if (!m->count)
|
|
|
|
shutdown("Invalid count parameter");
|
|
|
|
m->add = args[3];
|
2016-06-07 22:07:09 +03:00
|
|
|
m->flags = 0;
|
2016-05-25 18:37:40 +03:00
|
|
|
|
2016-06-09 03:51:00 +03:00
|
|
|
irq_disable();
|
2016-09-19 23:22:35 +03:00
|
|
|
uint8_t flags = s->flags;
|
|
|
|
if (!!(flags & SF_LAST_DIR) != !!(flags & SF_NEXT_DIR)) {
|
|
|
|
flags ^= SF_LAST_DIR;
|
2016-06-07 22:07:09 +03:00
|
|
|
m->flags |= MF_DIR;
|
|
|
|
}
|
2019-07-12 01:00:21 +03:00
|
|
|
flags &= ~SF_LAST_RESET;
|
2016-05-25 18:37:40 +03:00
|
|
|
if (s->count) {
|
2019-07-12 01:00:21 +03:00
|
|
|
s->flags = flags;
|
2020-11-21 02:57:47 +03:00
|
|
|
move_queue_push(&m->node, &s->mq);
|
2019-01-29 04:38:58 +03:00
|
|
|
} else if (flags & SF_NEED_RESET) {
|
|
|
|
move_free(m);
|
2016-05-25 18:37:40 +03:00
|
|
|
} else {
|
2019-07-12 01:00:21 +03:00
|
|
|
s->flags = flags;
|
2020-11-21 02:57:47 +03:00
|
|
|
move_queue_push(&m->node, &s->mq);
|
2017-08-10 04:24:06 +03:00
|
|
|
stepper_load_next(s, s->next_step_time + m->interval);
|
2017-03-11 06:12:05 +03:00
|
|
|
sched_add_timer(&s->time);
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
2016-06-09 03:51:00 +03:00
|
|
|
irq_enable();
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
DECL_COMMAND(command_queue_step,
|
|
|
|
"queue_step oid=%c interval=%u count=%hu add=%hi");
|
|
|
|
|
|
|
|
// Set the direction of the next queued step
|
|
|
|
void
|
|
|
|
command_set_next_step_dir(uint32_t *args)
|
|
|
|
{
|
2017-03-05 23:00:15 +03:00
|
|
|
struct stepper *s = stepper_oid_lookup(args[0]);
|
2016-06-07 22:07:09 +03:00
|
|
|
uint8_t nextdir = args[1] ? SF_NEXT_DIR : 0;
|
|
|
|
irq_disable();
|
|
|
|
s->flags = (s->flags & ~SF_NEXT_DIR) | nextdir;
|
|
|
|
irq_enable();
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
DECL_COMMAND(command_set_next_step_dir, "set_next_step_dir oid=%c dir=%c");
|
|
|
|
|
|
|
|
// Set an absolute time that the next step will be relative to
|
|
|
|
void
|
|
|
|
command_reset_step_clock(uint32_t *args)
|
|
|
|
{
|
2017-03-05 23:00:15 +03:00
|
|
|
struct stepper *s = stepper_oid_lookup(args[0]);
|
2016-05-25 18:37:40 +03:00
|
|
|
uint32_t waketime = args[1];
|
2016-09-19 23:22:35 +03:00
|
|
|
irq_disable();
|
2016-05-25 18:37:40 +03:00
|
|
|
if (s->count)
|
|
|
|
shutdown("Can't reset time when stepper active");
|
2016-06-14 02:42:34 +03:00
|
|
|
s->next_step_time = waketime;
|
2019-03-20 03:57:17 +03:00
|
|
|
s->flags = (s->flags & ~SF_NEED_RESET) | SF_LAST_RESET;
|
2016-09-19 23:22:35 +03:00
|
|
|
irq_enable();
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
DECL_COMMAND(command_reset_step_clock, "reset_step_clock oid=%c clock=%u");
|
|
|
|
|
|
|
|
// Return the current stepper position. Caller must disable irqs.
|
2017-03-05 23:00:15 +03:00
|
|
|
static uint32_t
|
2016-05-25 18:37:40 +03:00
|
|
|
stepper_get_position(struct stepper *s)
|
|
|
|
{
|
2016-06-14 02:42:34 +03:00
|
|
|
uint32_t position = s->position;
|
2020-03-04 21:30:06 +03:00
|
|
|
// If stepper is mid-move, subtract out steps not yet taken
|
2019-03-10 02:18:24 +03:00
|
|
|
if (CONFIG_STEP_DELAY <= 0)
|
2016-06-14 02:42:34 +03:00
|
|
|
position -= s->count;
|
|
|
|
else
|
|
|
|
position -= s->count / 2;
|
2020-03-04 21:30:06 +03:00
|
|
|
// The top bit of s->position is an optimized reverse direction flag
|
2016-05-25 18:37:40 +03:00
|
|
|
if (position & 0x80000000)
|
|
|
|
return -position;
|
|
|
|
return position;
|
|
|
|
}
|
|
|
|
|
2017-03-05 23:00:15 +03:00
|
|
|
// Report the current position of the stepper
|
|
|
|
void
|
|
|
|
command_stepper_get_position(uint32_t *args)
|
|
|
|
{
|
|
|
|
uint8_t oid = args[0];
|
|
|
|
struct stepper *s = stepper_oid_lookup(oid);
|
|
|
|
irq_disable();
|
|
|
|
uint32_t position = stepper_get_position(s);
|
|
|
|
irq_enable();
|
|
|
|
sendf("stepper_position oid=%c pos=%i", oid, position - POSITION_BIAS);
|
|
|
|
}
|
|
|
|
DECL_COMMAND(command_stepper_get_position, "stepper_get_position oid=%c");
|
|
|
|
|
2016-05-25 18:37:40 +03:00
|
|
|
// Stop all moves for a given stepper (used in end stop homing). IRQs
|
|
|
|
// must be off.
|
|
|
|
void
|
|
|
|
stepper_stop(struct stepper *s)
|
|
|
|
{
|
|
|
|
sched_del_timer(&s->time);
|
2016-11-14 21:10:14 +03:00
|
|
|
s->next_step_time = 0;
|
2016-11-08 18:39:32 +03:00
|
|
|
s->position = -stepper_get_position(s);
|
2016-06-01 19:43:58 +03:00
|
|
|
s->count = 0;
|
2019-01-29 04:38:58 +03:00
|
|
|
s->flags = (s->flags & SF_INVERT_STEP) | SF_NEED_RESET;
|
2016-06-01 19:43:58 +03:00
|
|
|
gpio_out_write(s->dir_pin, 0);
|
2017-05-11 21:01:06 +03:00
|
|
|
gpio_out_write(s->step_pin, s->flags & SF_INVERT_STEP);
|
2020-11-21 02:57:47 +03:00
|
|
|
while (!move_queue_empty(&s->mq)) {
|
|
|
|
struct move_node *mn = move_queue_pop(&s->mq);
|
|
|
|
struct stepper_move *m = container_of(mn, struct stepper_move, node);
|
|
|
|
move_free(m);
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-26 16:14:26 +03:00
|
|
|
void
|
2016-05-25 18:37:40 +03:00
|
|
|
stepper_shutdown(void)
|
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
struct stepper *s;
|
|
|
|
foreach_oid(i, s, command_config_stepper) {
|
2020-11-21 02:57:47 +03:00
|
|
|
move_queue_clear(&s->mq);
|
2016-06-01 19:43:58 +03:00
|
|
|
stepper_stop(s);
|
2016-05-25 18:37:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
DECL_SHUTDOWN(stepper_shutdown);
|