33 #include <linux/slab.h>
34 #include <linux/module.h>
53 struct clock_event_device
ced;
66 static inline unsigned long sh_cmt_read(
struct sh_cmt_priv *
p,
int reg_nr)
72 if (reg_nr ==
CMSTR) {
89 static inline void sh_cmt_write(
struct sh_cmt_priv *
p,
int reg_nr,
96 if (reg_nr ==
CMSTR) {
115 static unsigned long sh_cmt_get_counter(
struct sh_cmt_priv *p,
126 v1 = sh_cmt_read(p,
CMCNT);
127 v2 = sh_cmt_read(p,
CMCNT);
128 v3 = sh_cmt_read(p,
CMCNT);
130 }
while (
unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
131 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
145 value = sh_cmt_read(p,
CMSTR);
152 sh_cmt_write(p,
CMSTR, value);
160 pm_runtime_get_sync(&p->
pdev->dev);
161 dev_pm_syscore_device(&p->
pdev->dev,
true);
171 sh_cmt_start_stop_ch(p, 0);
174 if (p->
width == 16) {
176 sh_cmt_write(p,
CMCSR, 0x43);
179 sh_cmt_write(p,
CMCSR, 0x01a4);
182 sh_cmt_write(p,
CMCOR, 0xffffffff);
183 sh_cmt_write(p,
CMCNT, 0);
196 for (k = 0; k < 100; k++) {
197 if (!sh_cmt_read(p,
CMCNT))
202 if (sh_cmt_read(p,
CMCNT)) {
209 sh_cmt_start_stop_ch(p, 1);
222 sh_cmt_start_stop_ch(p, 0);
225 sh_cmt_write(p,
CMCSR, 0);
230 dev_pm_syscore_device(&p->
pdev->dev,
false);
231 pm_runtime_put(&p->
pdev->dev);
235 #define FLAG_CLOCKEVENT (1 << 0)
236 #define FLAG_CLOCKSOURCE (1 << 1)
237 #define FLAG_REPROGRAM (1 << 2)
238 #define FLAG_SKIPEVENT (1 << 3)
239 #define FLAG_IRQCONTEXT (1 << 4)
241 static void sh_cmt_clock_event_program_verify(
struct sh_cmt_priv *p,
244 unsigned long new_match;
246 unsigned long delay = 0;
247 unsigned long now = 0;
250 now = sh_cmt_get_counter(p, &has_wrapped);
269 new_match = now + value +
delay;
273 sh_cmt_write(p,
CMCOR, new_match);
275 now = sh_cmt_get_counter(p, &has_wrapped);
299 if (now < new_match) {
332 sh_cmt_clock_event_program_verify(p, 0);
335 static void sh_cmt_set_next(
struct sh_cmt_priv *p,
unsigned long delta)
340 __sh_cmt_set_next(p, delta);
365 if (p->
ced.mode == CLOCK_EVT_MODE_ONESHOT) {
370 p->
ced.event_handler(&p->
ced);
378 sh_cmt_clock_event_program_verify(p, 1);
381 if ((p->
ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
399 ret = sh_cmt_enable(p, &p->
rate);
414 static void sh_cmt_stop(
struct sh_cmt_priv *p,
unsigned long flag)
448 raw = sh_cmt_get_counter(p, &has_wrapped);
457 static int sh_cmt_clocksource_enable(
struct clocksource *cs)
468 __clocksource_updatefreq_hz(cs, p->
rate);
474 static void sh_cmt_clocksource_disable(
struct clocksource *cs)
484 static void sh_cmt_clocksource_suspend(
struct clocksource *cs)
489 pm_genpd_syscore_poweroff(&p->
pdev->dev);
492 static void sh_cmt_clocksource_resume(
struct clocksource *cs)
496 pm_genpd_syscore_poweron(&p->
pdev->dev);
500 static int sh_cmt_register_clocksource(
struct sh_cmt_priv *p,
501 char *
name,
unsigned long rating)
507 cs->
read = sh_cmt_clocksource_read;
508 cs->
enable = sh_cmt_clocksource_enable;
509 cs->
disable = sh_cmt_clocksource_disable;
510 cs->
suspend = sh_cmt_clocksource_suspend;
511 cs->
resume = sh_cmt_clocksource_resume;
518 clocksource_register_hz(cs, 1);
522 static struct sh_cmt_priv *ced_to_sh_cmt(
struct clock_event_device *
ced)
527 static void sh_cmt_clock_event_start(
struct sh_cmt_priv *p,
int periodic)
529 struct clock_event_device *ced = &p->
ced;
541 sh_cmt_set_next(p, ((p->
rate +
HZ/2) /
HZ) - 1);
546 static void sh_cmt_clock_event_mode(
enum clock_event_mode
mode,
547 struct clock_event_device *ced)
553 case CLOCK_EVT_MODE_PERIODIC:
554 case CLOCK_EVT_MODE_ONESHOT:
562 case CLOCK_EVT_MODE_PERIODIC:
563 dev_info(&p->
pdev->dev,
"used for periodic clock events\n");
564 sh_cmt_clock_event_start(p, 1);
566 case CLOCK_EVT_MODE_ONESHOT:
567 dev_info(&p->
pdev->dev,
"used for oneshot clock events\n");
568 sh_cmt_clock_event_start(p, 0);
570 case CLOCK_EVT_MODE_SHUTDOWN:
571 case CLOCK_EVT_MODE_UNUSED:
579 static int sh_cmt_clock_event_next(
unsigned long delta,
580 struct clock_event_device *ced)
584 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
588 sh_cmt_set_next(p, delta - 1);
593 static void sh_cmt_clock_event_suspend(
struct clock_event_device *ced)
595 pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->
pdev->dev);
598 static void sh_cmt_clock_event_resume(
struct clock_event_device *ced)
600 pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->
pdev->dev);
603 static void sh_cmt_register_clockevent(
struct sh_cmt_priv *p,
604 char *name,
unsigned long rating)
606 struct clock_event_device *ced = &p->
ced;
608 memset(ced, 0,
sizeof(*ced));
611 ced->features = CLOCK_EVT_FEAT_PERIODIC;
612 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
613 ced->rating = rating;
615 ced->set_next_event = sh_cmt_clock_event_next;
616 ced->set_mode = sh_cmt_clock_event_mode;
617 ced->suspend = sh_cmt_clock_event_suspend;
618 ced->resume = sh_cmt_clock_event_resume;
624 static int sh_cmt_register(
struct sh_cmt_priv *p,
char *name,
625 unsigned long clockevent_rating,
626 unsigned long clocksource_rating)
636 if (clockevent_rating)
637 sh_cmt_register_clockevent(p, name, clockevent_rating);
639 if (clocksource_rating)
640 sh_cmt_register_clocksource(p, name, clocksource_rating);
660 platform_set_drvdata(pdev, p);
664 dev_err(&p->
pdev->dev,
"failed to get I/O memory\n");
677 dev_err(&p->
pdev->dev,
"failed to remap I/O memory\n");
690 if (IS_ERR(p->
clk)) {
692 ret = PTR_ERR(p->
clk);
696 if (resource_size(res) == 6) {
706 ret = sh_cmt_register(p, (
char *)dev_name(&p->
pdev->dev),
717 dev_err(&p->
pdev->dev,
"failed to request irq %d\n", irq);
731 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
735 if (!is_early_platform_device(pdev)) {
736 pm_runtime_set_active(&pdev->
dev);
747 dev_err(&pdev->
dev,
"failed to allocate driver data\n");
751 ret = sh_cmt_setup(p, pdev);
754 platform_set_drvdata(pdev,
NULL);
755 pm_runtime_idle(&pdev->
dev);
758 if (is_early_platform_device(pdev))
765 pm_runtime_idle(&pdev->
dev);
776 .probe = sh_cmt_probe,
783 static int __init sh_cmt_init(
void)
788 static void __exit sh_cmt_exit(
void)