1
1
# sage.doctest: needs numpy sage.modules
2
- """
2
+ r """
3
3
Hidden Markov Models
4
4
5
5
This is a complete pure-Cython optimized implementation of Hidden
@@ -48,11 +48,11 @@ cdef HMM_Util util = HMM_Util()
48
48
# ##########################################
49
49
50
50
cdef class HiddenMarkovModel:
51
- """
51
+ r """
52
52
Abstract base class for all Hidden Markov Models.
53
53
"""
54
54
def initial_probabilities (self ):
55
- """
55
+ r """
56
56
Return the initial probabilities as a :class:`TimeSeries` of
57
57
length `N`, where `N` is the number of states of the Markov model.
58
58
@@ -90,7 +90,7 @@ cdef class HiddenMarkovModel:
90
90
return TimeSeries(self .pi)
91
91
92
92
def transition_matrix (self ):
93
- """
93
+ r """
94
94
Return the state transition matrix.
95
95
96
96
OUTPUT: a Sage matrix with real double precision ( RDF) entries.
@@ -133,15 +133,15 @@ cdef class HiddenMarkovModel:
133
133
return matrix(RDF, self .N, self .A.list())
134
134
135
135
def graph (self , eps = 1e-3 ):
136
- """
136
+ r """
137
137
Create a weighted directed graph from the transition matrix,
138
138
not including any edge with a probability less than ``eps``.
139
139
140
140
INPUT:
141
141
142
- - eps -- nonnegative real number
142
+ - `` eps`` -- nonnegative real number
143
143
144
- OUTPUT: a digraph
144
+ OUTPUT: a :class:`DiGraph`
145
145
146
146
EXAMPLES::
147
147
@@ -165,7 +165,7 @@ cdef class HiddenMarkovModel:
165
165
return DiGraph(m, weighted = True )
166
166
167
167
def sample (self , Py_ssize_t length , number = None , starting_state = None ):
168
- """
168
+ r """
169
169
Return number samples from this HMM of given length.
170
170
171
171
INPUT:
@@ -225,7 +225,7 @@ cdef class HiddenMarkovModel:
225
225
# HMM algorithms.
226
226
# ########################################################
227
227
cdef TimeSeries _baum_welch_gamma(self , TimeSeries alpha, TimeSeries beta):
228
- """
228
+ r """
229
229
Used internally to compute the scaled quantity gamma_t( j)
230
230
appearing in the Baum-Welch reestimation algorithm.
231
231
@@ -256,7 +256,7 @@ cdef class HiddenMarkovModel:
256
256
257
257
258
258
cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
259
- """
259
+ r """
260
260
A discrete Hidden Markov model implemented using double precision
261
261
floating point arithmetic.
262
262
@@ -328,7 +328,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
328
328
cdef object _emission_symbols, _emission_symbols_dict
329
329
330
330
def __init__ (self , A , B , pi , emission_symbols = None , bint normalize = True ):
331
- """
331
+ r """
332
332
Create a discrete emissions HMM with transition probability
333
333
matrix A, emission probabilities given by B, initial state
334
334
probabilities pi, and given emission symbols ( which default
@@ -371,7 +371,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
371
371
util.normalize_probability_TimeSeries(self .B, i* self .n_out, (i+ 1 )* self .n_out)
372
372
373
373
def __reduce__ (self ):
374
- """
374
+ r """
375
375
Used in pickling.
376
376
377
377
EXAMPLES::
@@ -384,7 +384,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
384
384
(self .A, self .B, self .pi, self .n_out, self ._emission_symbols, self ._emission_symbols_dict)
385
385
386
386
def __richcmp__ (self , other , op ):
387
- """
387
+ r """
388
388
EXAMPLES::
389
389
390
390
sage: m = hmm. DiscreteHiddenMarkovModel( [[0.4,0.6 ],[0.1,0.9 ]], [[0.0,1.0 ],[0.5,0.5 ]], [.5,.5 ])
@@ -404,7 +404,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
404
404
other.__reduce__()[1 ], op)
405
405
406
406
def emission_matrix (self ):
407
- """
407
+ r """
408
408
Return the matrix whose `i`-th row specifies the emission
409
409
probability distribution for the `i`-th state.
410
410
@@ -456,12 +456,12 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
456
456
return s
457
457
458
458
def _emission_symbols_to_IntList (self , obs ):
459
- """
459
+ r """
460
460
Internal function used to convert a list of emission symbols to an :class:`IntList`.
461
461
462
462
INPUT:
463
463
464
- - obs -- a list of objects
464
+ - `` obs`` -- a list of objects
465
465
466
466
OUTPUT: an IntList
467
467
@@ -475,12 +475,12 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
475
475
return IntList([d[x] for x in obs])
476
476
477
477
def _IntList_to_emission_symbols (self , obs ):
478
- """
478
+ r """
479
479
Internal function used to convert a list of emission symbols to an IntList.
480
480
481
481
INPUT:
482
482
483
- - obs -- a list of objects
483
+ - `` obs`` -- a list of objects
484
484
485
485
OUTPUT: an IntList
486
486
@@ -494,7 +494,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
494
494
return [d[x] for x in obs]
495
495
496
496
def log_likelihood (self , obs , bint scale = True ):
497
- """
497
+ r """
498
498
Return the logarithm of the probability that this model produced the given
499
499
observation sequence. Thus the output is a non-positive number.
500
500
@@ -549,7 +549,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
549
549
return self ._forward(obs)
550
550
551
551
def _forward (self , IntList obs ):
552
- """
552
+ r """
553
553
Memory-efficient implementation of the forward algorithm, without scaling.
554
554
555
555
INPUT:
@@ -558,7 +558,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
558
558
559
559
OUTPUT:
560
560
561
- - ``float`` -- the log of the probability that the model produced this sequence
561
+ ``float`` -- the log of the probability that the model produced this sequence
562
562
563
563
EXAMPLES::
564
564
@@ -604,7 +604,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
604
604
return log(alpha.sum())
605
605
606
606
def _forward_scale (self , IntList obs ):
607
- """
607
+ r """
608
608
Memory-efficient implementation of the forward algorithm, with scaling.
609
609
610
610
INPUT:
@@ -613,7 +613,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
613
613
614
614
OUTPUT:
615
615
616
- - ``float`` -- the log of the probability that the model produced this sequence
616
+ ``float`` -- the log of the probability that the model produced this sequence
617
617
618
618
EXAMPLES::
619
619
@@ -693,7 +693,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
693
693
return log_probability
694
694
695
695
def generate_sequence (self , Py_ssize_t length , starting_state = None ):
696
- """
696
+ r """
697
697
Return a sample of the given length from this HMM.
698
698
699
699
INPUT:
@@ -704,7 +704,6 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
704
704
instead of the initial probabilities to determine the
705
705
starting state.
706
706
707
-
708
707
OUTPUT:
709
708
710
709
- an :class:`IntList` or list of emission symbols
@@ -812,7 +811,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
812
811
return self ._IntList_to_emission_symbols(obs), states
813
812
814
813
cdef int _gen_symbol(self , int q, double r):
815
- """
814
+ r """
816
815
Generate a symbol in state q using the randomly chosen
817
816
floating point number r, which should be between 0 and 1.
818
817
@@ -823,7 +822,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
823
822
824
823
OUTPUT:
825
824
826
- - a nonnegative int
825
+ a nonnegative int
827
826
828
827
EXAMPLES::
829
828
@@ -848,7 +847,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
848
847
return self .n_out - 1
849
848
850
849
def viterbi (self , obs , log_scale = True ):
851
- """
850
+ r """
852
851
Determine "the" hidden sequence of states that is most likely
853
852
to produce the given sequence seq of observations, along with
854
853
the probability that this hidden sequence actually produced
@@ -899,7 +898,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
899
898
return self ._viterbi(obs)
900
899
901
900
cpdef _viterbi(self , IntList obs):
902
- """
901
+ r """
903
902
Used internally to compute the viterbi path, without
904
903
rescaling. This can be useful for short sequences.
905
904
@@ -979,12 +978,12 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
979
978
980
979
981
980
cpdef _viterbi_scale(self , IntList obs):
982
- """
981
+ r """
983
982
Used internally to compute the viterbi path with rescaling.
984
983
985
984
INPUT:
986
985
987
- - obs -- IntList
986
+ - `` obs`` -- IntList
988
987
989
988
OUTPUT:
990
989
@@ -1110,7 +1109,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
1110
1109
return beta
1111
1110
1112
1111
cdef _forward_scale_all(self , IntList obs):
1113
- """
1112
+ r """
1114
1113
Return scaled values alpha_t( i) , the sequence of scalings, and
1115
1114
the log probability.
1116
1115
@@ -1171,7 +1170,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
1171
1170
return alpha, scale, log_probability
1172
1171
1173
1172
cdef TimeSeries _baum_welch_xi(self , TimeSeries alpha, TimeSeries beta, IntList obs):
1174
- """
1173
+ r """
1175
1174
Used internally to compute the scaled quantity xi_t( i,j)
1176
1175
appearing in the Baum-Welch reestimation algorithm.
1177
1176
@@ -1204,7 +1203,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
1204
1203
return xi
1205
1204
1206
1205
def baum_welch (self , obs , int max_iter = 100 , double log_likelihood_cutoff = 1e-4 , bint fix_emissions = False ):
1207
- """
1206
+ r """
1208
1207
Given an observation sequence obs, improve this HMM using the
1209
1208
Baum-Welch algorithm to increase the probability of observing obs.
1210
1209
@@ -1225,8 +1224,8 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
1225
1224
1226
1225
OUTPUT:
1227
1226
1228
- - changes the model in places , and returns the log
1229
- likelihood and number of iterations.
1227
+ changes the model in place , and returns the log
1228
+ likelihood and number of iterations.
1230
1229
1231
1230
EXAMPLES::
1232
1231
@@ -1355,7 +1354,7 @@ cdef class DiscreteHiddenMarkovModel(HiddenMarkovModel):
1355
1354
1356
1355
# Keep this -- it's for backwards compatibility with the GHMM based implementation
1357
1356
def unpickle_discrete_hmm_v0 (A , B , pi , emission_symbols , name ):
1358
- """
1357
+ r """
1359
1358
TESTS::
1360
1359
1361
1360
sage: m = hmm. DiscreteHiddenMarkovModel( [[0.4,0.6 ],[0.1,0.9 ]], [[0.0,1.0 ],[0.5,0.5 ]], [1,0 ])
0 commit comments