PageRenderTime 47ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/kernel/time/ntp.c

https://gitlab.com/LiquidSmooth-Devices/android_kernel_htc_msm8974
C | 793 lines | 574 code | 200 blank | 19 comment | 93 complexity | 839700e6edbad57b2f6c0860138ea500 MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * NTP state machine interfaces and logic.
  3. *
  4. * This code was mainly moved from kernel/timer.c and kernel/time.c
  5. * Please see those files for relevant copyright info and historical
  6. * changelogs.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/clocksource.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/hrtimer.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/math64.h>
  14. #include <linux/timex.h>
  15. #include <linux/time.h>
  16. #include <linux/mm.h>
  17. #include <linux/module.h>
  18. #include "tick-internal.h"
  19. DEFINE_SPINLOCK(ntp_lock);
  20. unsigned long tick_usec = TICK_USEC;
  21. unsigned long tick_nsec;
  22. static u64 tick_length;
  23. static u64 tick_length_base;
  24. #define MAX_TICKADJ 500LL
  25. #define MAX_TICKADJ_SCALED \
  26. (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
  27. static int time_state = TIME_OK;
  28. static int time_status = STA_UNSYNC;
  29. static long time_tai;
  30. static s64 time_offset;
  31. static long time_constant = 2;
  32. static long time_maxerror = NTP_PHASE_LIMIT;
  33. static long time_esterror = NTP_PHASE_LIMIT;
  34. static s64 time_freq;
  35. static long time_reftime;
  36. static long time_adjust;
  37. static s64 ntp_tick_adj;
  38. #ifdef CONFIG_NTP_PPS
  39. #define PPS_VALID 10
  40. #define PPS_POPCORN 4
  41. #define PPS_INTMIN 2
  42. #define PPS_INTMAX 8
  43. #define PPS_INTCOUNT 4
  44. #define PPS_MAXWANDER 100000
  45. static int pps_valid;
  46. static long pps_tf[3];
  47. static long pps_jitter;
  48. static struct timespec pps_fbase;
  49. static int pps_shift;
  50. static int pps_intcnt;
  51. static s64 pps_freq;
  52. static long pps_stabil;
  53. static long pps_calcnt;
  54. static long pps_jitcnt;
  55. static long pps_stbcnt;
  56. static long pps_errcnt;
  57. static inline s64 ntp_offset_chunk(s64 offset)
  58. {
  59. if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
  60. return offset;
  61. else
  62. return shift_right(offset, SHIFT_PLL + time_constant);
  63. }
  64. static inline void pps_reset_freq_interval(void)
  65. {
  66. pps_shift = PPS_INTMIN;
  67. pps_intcnt = 0;
  68. }
  69. static inline void pps_clear(void)
  70. {
  71. pps_reset_freq_interval();
  72. pps_tf[0] = 0;
  73. pps_tf[1] = 0;
  74. pps_tf[2] = 0;
  75. pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
  76. pps_freq = 0;
  77. }
  78. static inline void pps_dec_valid(void)
  79. {
  80. if (pps_valid > 0)
  81. pps_valid--;
  82. else {
  83. time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
  84. STA_PPSWANDER | STA_PPSERROR);
  85. pps_clear();
  86. }
  87. }
  88. static inline void pps_set_freq(s64 freq)
  89. {
  90. pps_freq = freq;
  91. }
  92. static inline int is_error_status(int status)
  93. {
  94. return (time_status & (STA_UNSYNC|STA_CLOCKERR))
  95. || ((time_status & (STA_PPSFREQ|STA_PPSTIME))
  96. && !(time_status & STA_PPSSIGNAL))
  97. || ((time_status & (STA_PPSTIME|STA_PPSJITTER))
  98. == (STA_PPSTIME|STA_PPSJITTER))
  99. || ((time_status & STA_PPSFREQ)
  100. && (time_status & (STA_PPSWANDER|STA_PPSERROR)));
  101. }
  102. static inline void pps_fill_timex(struct timex *txc)
  103. {
  104. txc->ppsfreq = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
  105. PPM_SCALE_INV, NTP_SCALE_SHIFT);
  106. txc->jitter = pps_jitter;
  107. if (!(time_status & STA_NANO))
  108. txc->jitter /= NSEC_PER_USEC;
  109. txc->shift = pps_shift;
  110. txc->stabil = pps_stabil;
  111. txc->jitcnt = pps_jitcnt;
  112. txc->calcnt = pps_calcnt;
  113. txc->errcnt = pps_errcnt;
  114. txc->stbcnt = pps_stbcnt;
  115. }
  116. #else
  117. static inline s64 ntp_offset_chunk(s64 offset)
  118. {
  119. return shift_right(offset, SHIFT_PLL + time_constant);
  120. }
  121. static inline void pps_reset_freq_interval(void) {}
  122. static inline void pps_clear(void) {}
  123. static inline void pps_dec_valid(void) {}
  124. static inline void pps_set_freq(s64 freq) {}
  125. static inline int is_error_status(int status)
  126. {
  127. return status & (STA_UNSYNC|STA_CLOCKERR);
  128. }
  129. static inline void pps_fill_timex(struct timex *txc)
  130. {
  131. txc->ppsfreq = 0;
  132. txc->jitter = 0;
  133. txc->shift = 0;
  134. txc->stabil = 0;
  135. txc->jitcnt = 0;
  136. txc->calcnt = 0;
  137. txc->errcnt = 0;
  138. txc->stbcnt = 0;
  139. }
  140. #endif
  141. static inline int ntp_synced(void)
  142. {
  143. return !(time_status & STA_UNSYNC);
  144. }
  145. static void ntp_update_frequency(void)
  146. {
  147. u64 second_length;
  148. u64 new_base;
  149. second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
  150. << NTP_SCALE_SHIFT;
  151. second_length += ntp_tick_adj;
  152. second_length += time_freq;
  153. tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
  154. new_base = div_u64(second_length, NTP_INTERVAL_FREQ);
  155. tick_length += new_base - tick_length_base;
  156. tick_length_base = new_base;
  157. }
  158. static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
  159. {
  160. time_status &= ~STA_MODE;
  161. if (secs < MINSEC)
  162. return 0;
  163. if (!(time_status & STA_FLL) && (secs <= MAXSEC))
  164. return 0;
  165. time_status |= STA_MODE;
  166. return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
  167. }
  168. static void ntp_update_offset(long offset)
  169. {
  170. s64 freq_adj;
  171. s64 offset64;
  172. long secs;
  173. if (!(time_status & STA_PLL))
  174. return;
  175. if (!(time_status & STA_NANO))
  176. offset *= NSEC_PER_USEC;
  177. offset = min(offset, MAXPHASE);
  178. offset = max(offset, -MAXPHASE);
  179. secs = get_seconds() - time_reftime;
  180. if (unlikely(time_status & STA_FREQHOLD))
  181. secs = 0;
  182. time_reftime = get_seconds();
  183. offset64 = offset;
  184. freq_adj = ntp_update_offset_fll(offset64, secs);
  185. if (unlikely(secs > 1 << (SHIFT_PLL + 1 + time_constant)))
  186. secs = 1 << (SHIFT_PLL + 1 + time_constant);
  187. freq_adj += (offset64 * secs) <<
  188. (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
  189. freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED);
  190. time_freq = max(freq_adj, -MAXFREQ_SCALED);
  191. time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
  192. }
  193. void ntp_clear(void)
  194. {
  195. unsigned long flags;
  196. spin_lock_irqsave(&ntp_lock, flags);
  197. time_adjust = 0;
  198. time_status |= STA_UNSYNC;
  199. time_maxerror = NTP_PHASE_LIMIT;
  200. time_esterror = NTP_PHASE_LIMIT;
  201. ntp_update_frequency();
  202. tick_length = tick_length_base;
  203. time_offset = 0;
  204. pps_clear();
  205. spin_unlock_irqrestore(&ntp_lock, flags);
  206. }
  207. u64 ntp_tick_length(void)
  208. {
  209. unsigned long flags;
  210. s64 ret;
  211. spin_lock_irqsave(&ntp_lock, flags);
  212. ret = tick_length;
  213. spin_unlock_irqrestore(&ntp_lock, flags);
  214. return ret;
  215. }
  216. int second_overflow(unsigned long secs)
  217. {
  218. s64 delta;
  219. int leap = 0;
  220. unsigned long flags;
  221. spin_lock_irqsave(&ntp_lock, flags);
  222. switch (time_state) {
  223. case TIME_OK:
  224. if (time_status & STA_INS)
  225. time_state = TIME_INS;
  226. else if (time_status & STA_DEL)
  227. time_state = TIME_DEL;
  228. break;
  229. case TIME_INS:
  230. if (secs % 86400 == 0) {
  231. leap = -1;
  232. time_state = TIME_OOP;
  233. time_tai++;
  234. printk(KERN_NOTICE
  235. "Clock: inserting leap second 23:59:60 UTC\n");
  236. }
  237. break;
  238. case TIME_DEL:
  239. if ((secs + 1) % 86400 == 0) {
  240. leap = 1;
  241. time_tai--;
  242. time_state = TIME_WAIT;
  243. printk(KERN_NOTICE
  244. "Clock: deleting leap second 23:59:59 UTC\n");
  245. }
  246. break;
  247. case TIME_OOP:
  248. time_state = TIME_WAIT;
  249. break;
  250. case TIME_WAIT:
  251. if (!(time_status & (STA_INS | STA_DEL)))
  252. time_state = TIME_OK;
  253. break;
  254. }
  255. time_maxerror += MAXFREQ / NSEC_PER_USEC;
  256. if (time_maxerror > NTP_PHASE_LIMIT) {
  257. time_maxerror = NTP_PHASE_LIMIT;
  258. time_status |= STA_UNSYNC;
  259. }
  260. tick_length = tick_length_base;
  261. delta = ntp_offset_chunk(time_offset);
  262. time_offset -= delta;
  263. tick_length += delta;
  264. pps_dec_valid();
  265. if (!time_adjust)
  266. goto out;
  267. if (time_adjust > MAX_TICKADJ) {
  268. time_adjust -= MAX_TICKADJ;
  269. tick_length += MAX_TICKADJ_SCALED;
  270. goto out;
  271. }
  272. if (time_adjust < -MAX_TICKADJ) {
  273. time_adjust += MAX_TICKADJ;
  274. tick_length -= MAX_TICKADJ_SCALED;
  275. goto out;
  276. }
  277. tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
  278. << NTP_SCALE_SHIFT;
  279. time_adjust = 0;
  280. out:
  281. spin_unlock_irqrestore(&ntp_lock, flags);
  282. return leap;
  283. }
  284. #ifdef CONFIG_GENERIC_CMOS_UPDATE
  285. static void sync_cmos_clock(struct work_struct *work);
  286. static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
  287. static void sync_cmos_clock(struct work_struct *work)
  288. {
  289. struct timespec now, next;
  290. int fail = 1;
  291. if (!ntp_synced()) {
  292. return;
  293. }
  294. getnstimeofday(&now);
  295. if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
  296. fail = update_persistent_clock(now);
  297. next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
  298. if (next.tv_nsec <= 0)
  299. next.tv_nsec += NSEC_PER_SEC;
  300. if (!fail)
  301. next.tv_sec = 659;
  302. else
  303. next.tv_sec = 0;
  304. if (next.tv_nsec >= NSEC_PER_SEC) {
  305. next.tv_sec++;
  306. next.tv_nsec -= NSEC_PER_SEC;
  307. }
  308. schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
  309. }
  310. static void notify_cmos_timer(void)
  311. {
  312. schedule_delayed_work(&sync_cmos_work, 0);
  313. }
  314. #else
  315. static inline void notify_cmos_timer(void) { }
  316. #endif
  317. static inline void process_adj_status(struct timex *txc, struct timespec *ts)
  318. {
  319. if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
  320. time_state = TIME_OK;
  321. time_status = STA_UNSYNC;
  322. pps_reset_freq_interval();
  323. }
  324. if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
  325. time_reftime = get_seconds();
  326. time_status &= STA_RONLY;
  327. time_status |= txc->status & ~STA_RONLY;
  328. }
  329. static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
  330. {
  331. if (txc->modes & ADJ_STATUS)
  332. process_adj_status(txc, ts);
  333. if (txc->modes & ADJ_NANO)
  334. time_status |= STA_NANO;
  335. if (txc->modes & ADJ_MICRO)
  336. time_status &= ~STA_NANO;
  337. if (txc->modes & ADJ_FREQUENCY) {
  338. time_freq = txc->freq * PPM_SCALE;
  339. time_freq = min(time_freq, MAXFREQ_SCALED);
  340. time_freq = max(time_freq, -MAXFREQ_SCALED);
  341. pps_set_freq(time_freq);
  342. }
  343. if (txc->modes & ADJ_MAXERROR)
  344. time_maxerror = txc->maxerror;
  345. if (txc->modes & ADJ_ESTERROR)
  346. time_esterror = txc->esterror;
  347. if (txc->modes & ADJ_TIMECONST) {
  348. time_constant = txc->constant;
  349. if (!(time_status & STA_NANO))
  350. time_constant += 4;
  351. time_constant = min(time_constant, (long)MAXTC);
  352. time_constant = max(time_constant, 0l);
  353. }
  354. if (txc->modes & ADJ_TAI && txc->constant > 0)
  355. time_tai = txc->constant;
  356. if (txc->modes & ADJ_OFFSET)
  357. ntp_update_offset(txc->offset);
  358. if (txc->modes & ADJ_TICK)
  359. tick_usec = txc->tick;
  360. if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
  361. ntp_update_frequency();
  362. }
  363. int do_adjtimex(struct timex *txc)
  364. {
  365. struct timespec ts;
  366. int result;
  367. if (txc->modes & ADJ_ADJTIME) {
  368. if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
  369. return -EINVAL;
  370. if (!(txc->modes & ADJ_OFFSET_READONLY) &&
  371. !capable(CAP_SYS_TIME))
  372. return -EPERM;
  373. } else {
  374. if (txc->modes && !capable(CAP_SYS_TIME))
  375. return -EPERM;
  376. if (txc->modes & ADJ_TICK &&
  377. (txc->tick < 900000/USER_HZ ||
  378. txc->tick > 1100000/USER_HZ))
  379. return -EINVAL;
  380. }
  381. if (txc->modes & ADJ_SETOFFSET) {
  382. struct timespec delta;
  383. delta.tv_sec = txc->time.tv_sec;
  384. delta.tv_nsec = txc->time.tv_usec;
  385. if (!capable(CAP_SYS_TIME))
  386. return -EPERM;
  387. if (!(txc->modes & ADJ_NANO))
  388. delta.tv_nsec *= 1000;
  389. result = timekeeping_inject_offset(&delta);
  390. if (result)
  391. return result;
  392. }
  393. getnstimeofday(&ts);
  394. spin_lock_irq(&ntp_lock);
  395. if (txc->modes & ADJ_ADJTIME) {
  396. long save_adjust = time_adjust;
  397. if (!(txc->modes & ADJ_OFFSET_READONLY)) {
  398. time_adjust = txc->offset;
  399. ntp_update_frequency();
  400. }
  401. txc->offset = save_adjust;
  402. } else {
  403. if (txc->modes)
  404. process_adjtimex_modes(txc, &ts);
  405. txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
  406. NTP_SCALE_SHIFT);
  407. if (!(time_status & STA_NANO))
  408. txc->offset /= NSEC_PER_USEC;
  409. }
  410. result = time_state;
  411. if (is_error_status(time_status))
  412. result = TIME_ERROR;
  413. txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
  414. PPM_SCALE_INV, NTP_SCALE_SHIFT);
  415. txc->maxerror = time_maxerror;
  416. txc->esterror = time_esterror;
  417. txc->status = time_status;
  418. txc->constant = time_constant;
  419. txc->precision = 1;
  420. txc->tolerance = MAXFREQ_SCALED / PPM_SCALE;
  421. txc->tick = tick_usec;
  422. txc->tai = time_tai;
  423. pps_fill_timex(txc);
  424. spin_unlock_irq(&ntp_lock);
  425. txc->time.tv_sec = ts.tv_sec;
  426. txc->time.tv_usec = ts.tv_nsec;
  427. if (!(time_status & STA_NANO))
  428. txc->time.tv_usec /= NSEC_PER_USEC;
  429. notify_cmos_timer();
  430. return result;
  431. }
  432. #ifdef CONFIG_NTP_PPS
  433. struct pps_normtime {
  434. __kernel_time_t sec;
  435. long nsec;
  436. };
  437. static inline struct pps_normtime pps_normalize_ts(struct timespec ts)
  438. {
  439. struct pps_normtime norm = {
  440. .sec = ts.tv_sec,
  441. .nsec = ts.tv_nsec
  442. };
  443. if (norm.nsec > (NSEC_PER_SEC >> 1)) {
  444. norm.nsec -= NSEC_PER_SEC;
  445. norm.sec++;
  446. }
  447. return norm;
  448. }
  449. static inline long pps_phase_filter_get(long *jitter)
  450. {
  451. *jitter = pps_tf[0] - pps_tf[1];
  452. if (*jitter < 0)
  453. *jitter = -*jitter;
  454. return pps_tf[0];
  455. }
  456. static inline void pps_phase_filter_add(long err)
  457. {
  458. pps_tf[2] = pps_tf[1];
  459. pps_tf[1] = pps_tf[0];
  460. pps_tf[0] = err;
  461. }
  462. static inline void pps_dec_freq_interval(void)
  463. {
  464. if (--pps_intcnt <= -PPS_INTCOUNT) {
  465. pps_intcnt = -PPS_INTCOUNT;
  466. if (pps_shift > PPS_INTMIN) {
  467. pps_shift--;
  468. pps_intcnt = 0;
  469. }
  470. }
  471. }
  472. static inline void pps_inc_freq_interval(void)
  473. {
  474. if (++pps_intcnt >= PPS_INTCOUNT) {
  475. pps_intcnt = PPS_INTCOUNT;
  476. if (pps_shift < PPS_INTMAX) {
  477. pps_shift++;
  478. pps_intcnt = 0;
  479. }
  480. }
  481. }
  482. static long hardpps_update_freq(struct pps_normtime freq_norm)
  483. {
  484. long delta, delta_mod;
  485. s64 ftemp;
  486. if (freq_norm.sec > (2 << pps_shift)) {
  487. time_status |= STA_PPSERROR;
  488. pps_errcnt++;
  489. pps_dec_freq_interval();
  490. pr_err("hardpps: PPSERROR: interval too long - %ld s\n",
  491. freq_norm.sec);
  492. return 0;
  493. }
  494. ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
  495. freq_norm.sec);
  496. delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
  497. pps_freq = ftemp;
  498. if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
  499. pr_warning("hardpps: PPSWANDER: change=%ld\n", delta);
  500. time_status |= STA_PPSWANDER;
  501. pps_stbcnt++;
  502. pps_dec_freq_interval();
  503. } else {
  504. pps_inc_freq_interval();
  505. }
  506. delta_mod = delta;
  507. if (delta_mod < 0)
  508. delta_mod = -delta_mod;
  509. pps_stabil += (div_s64(((s64)delta_mod) <<
  510. (NTP_SCALE_SHIFT - SHIFT_USEC),
  511. NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
  512. if ((time_status & STA_PPSFREQ) != 0 &&
  513. (time_status & STA_FREQHOLD) == 0) {
  514. time_freq = pps_freq;
  515. ntp_update_frequency();
  516. }
  517. return delta;
  518. }
  519. static void hardpps_update_phase(long error)
  520. {
  521. long correction = -error;
  522. long jitter;
  523. pps_phase_filter_add(correction);
  524. correction = pps_phase_filter_get(&jitter);
  525. if (jitter > (pps_jitter << PPS_POPCORN)) {
  526. pr_warning("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
  527. jitter, (pps_jitter << PPS_POPCORN));
  528. time_status |= STA_PPSJITTER;
  529. pps_jitcnt++;
  530. } else if (time_status & STA_PPSTIME) {
  531. time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
  532. NTP_INTERVAL_FREQ);
  533. time_adjust = 0;
  534. }
  535. pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
  536. }
  537. /*
  538. * hardpps() - discipline CPU clock oscillator to external PPS signal
  539. *
  540. * This routine is called at each PPS signal arrival in order to
  541. * discipline the CPU clock oscillator to the PPS signal. It takes two
  542. * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
  543. * is used to correct clock phase error and the latter is used to
  544. * correct the frequency.
  545. *
  546. * This code is based on David Mills's reference nanokernel
  547. * implementation. It was mostly rewritten but keeps the same idea.
  548. */
  549. void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
  550. {
  551. struct pps_normtime pts_norm, freq_norm;
  552. unsigned long flags;
  553. pts_norm = pps_normalize_ts(*phase_ts);
  554. spin_lock_irqsave(&ntp_lock, flags);
  555. time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
  556. time_status |= STA_PPSSIGNAL;
  557. pps_valid = PPS_VALID;
  558. if (unlikely(pps_fbase.tv_sec == 0)) {
  559. pps_fbase = *raw_ts;
  560. spin_unlock_irqrestore(&ntp_lock, flags);
  561. return;
  562. }
  563. freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase));
  564. if ((freq_norm.sec == 0) ||
  565. (freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
  566. (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
  567. time_status |= STA_PPSJITTER;
  568. pps_fbase = *raw_ts;
  569. spin_unlock_irqrestore(&ntp_lock, flags);
  570. pr_err("hardpps: PPSJITTER: bad pulse\n");
  571. return;
  572. }
  573. if (freq_norm.sec >= (1 << pps_shift)) {
  574. pps_calcnt++;
  575. pps_fbase = *raw_ts;
  576. hardpps_update_freq(freq_norm);
  577. }
  578. hardpps_update_phase(pts_norm.nsec);
  579. spin_unlock_irqrestore(&ntp_lock, flags);
  580. }
  581. EXPORT_SYMBOL(hardpps);
  582. #endif
  583. static int __init ntp_tick_adj_setup(char *str)
  584. {
  585. ntp_tick_adj = simple_strtol(str, NULL, 0);
  586. ntp_tick_adj <<= NTP_SCALE_SHIFT;
  587. return 1;
  588. }
  589. __setup("ntp_tick_adj=", ntp_tick_adj_setup);
  590. void __init ntp_init(void)
  591. {
  592. ntp_clear();
  593. }