]> git.pld-linux.org Git - packages/ntp.git/blob - ntp-4.2.4p7-freqmode.patch
- release 4
[packages/ntp.git] / ntp-4.2.4p7-freqmode.patch
1 diff -up ntp-4.2.4p7/ntpd/ntp_loopfilter.c.freqmode ntp-4.2.4p7/ntpd/ntp_loopfilter.c
2 --- ntp-4.2.4p7/ntpd/ntp_loopfilter.c.freqmode  2009-05-28 15:19:30.000000000 +0200
3 +++ ntp-4.2.4p7/ntpd/ntp_loopfilter.c   2009-05-28 17:21:30.000000000 +0200
4 @@ -133,6 +133,7 @@ u_long      sys_clocktime;          /* last system cl
5  u_long pps_control;            /* last pps update */
6  u_long sys_tai;                /* UTC offset from TAI (s) */
7  static void rstclock P((int, u_long, double)); /* transition function */
8 +static double direct_freq(double, u_long); /* calculate frequency directly */
9  
10  #ifdef KERNEL_PLL
11  struct timex ntv;              /* kernel API parameters */
12 @@ -359,8 +360,7 @@ local_clock(
13                         if (mu < clock_minstep)
14                                 return (0);
15  
16 -                       clock_frequency = (fp_offset - clock_offset) /
17 -                           mu;
18 +                       clock_frequency = direct_freq(fp_offset, mu);
19  
20                         /* fall through to S_SPIK */
21  
22 @@ -451,16 +451,16 @@ local_clock(
23  
24                 /*
25                  * In S_FREQ state ignore updates until the stepout
26 -                * threshold. After that, correct the phase and
27 -                * frequency and switch to S_SYNC state.
28 +                * threshold. After that, compute the new frequency, but
29 +                * do not adjust the phase or frequency until the next
30 +                * update.
31                  */
32                 case S_FREQ:
33                         if (mu < clock_minstep)
34                                 return (0);
35  
36 -                       clock_frequency = (fp_offset - clock_offset) /
37 -                           mu;
38 -                       rstclock(S_SYNC, peer->epoch, fp_offset);
39 +                       clock_frequency = direct_freq(fp_offset, mu);
40 +                       rstclock(S_SYNC, peer->epoch, 0);
41                         break;
42  
43                 /*
44 @@ -590,8 +590,7 @@ local_clock(
45                          */
46                         if (clock_frequency != 0) {
47                                 ntv.modes |= MOD_FREQUENCY;
48 -                               ntv.freq = (int32)((clock_frequency +
49 -                                   drift_comp) * 65536e6);
50 +                               ntv.freq = (int32)(clock_frequency * 65536e6);
51                         }
52                         ntv.esterror = (u_int32)(clock_jitter * 1e6);
53                         ntv.maxerror = (u_int32)((sys_rootdelay / 2 +
54 @@ -837,6 +836,43 @@ rstclock(
55         last_offset = clock_offset = offset;
56  }
57  
58 +/*
59 + * calc_freq - calculate frequency directly
60 + *
61 + * This is very carefully done. When the offset is first computed at the
62 + * first update, a residual frequency component results. Subsequently,
63 + * updates are suppresed until the end of the measurement interval while
64 + * the offset is amortized. At the end of the interval the frequency is
65 + * calculated from the current offset, residual offset, length of the
66 + * interval and residual frequency component. At the same time the
67 + * frequenchy file is armed for update at the next hourly stats.
68 + */
69 +static double
70 +direct_freq(
71 +       double  fp_offset,
72 +       u_long mu
73 +       )
74 +{
75 +
76 +#ifdef KERNEL_PLL
77 +       /*
78 +        * If the kernel is enabled, we need the residual offset to
79 +        * calculate the frequency correction.
80 +        */
81 +       if (pll_control && kern_enable) {
82 +               memset(&ntv,  0, sizeof(ntv));
83 +               ntp_adjtime(&ntv);
84 +#ifdef STA_NANO
85 +               clock_offset = ntv.offset / 1e9;
86 +#else /* STA_NANO */
87 +               clock_offset = ntv.offset / 1e6;
88 +#endif /* STA_NANO */
89 +               drift_comp = ntv.freq / 65536e6;
90 +       }
91 +#endif /* KERNEL_PLL */
92 +       return (fp_offset - clock_offset) / mu + drift_comp;
93 +}
94 +
95  
96  int huffpuff_enabled()
97  {
This page took 0.029005 seconds and 3 git commands to generate.