Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# -*- coding: utf-8 -*- 

2from ._core import math, linalg 

3import numpy as np 

4 

5 

6def solve( 

7 design_matrix, 

8 data, 

9 *, 

10 C=None, 

11 cho_C=None, 

12 mu=0.0, 

13 L=None, 

14 cho_L=None, 

15 N=None, 

16): 

17 """ 

18 Solve the generalized least squares (GLS) problem. 

19 

20 Args: 

21 design_matrix (matrix): The design matrix that transforms a vector 

22 from coefficient space to data space. 

23 data (vector): The observed dataset. 

24 C (scalar, vector, or matrix): The data covariance. This may be 

25 a scalar, in which case the noise is assumed to be 

26 homoscedastic, a vector, in which case the covariance 

27 is assumed to be diagonal, or a matrix specifying the full 

28 covariance of the dataset. Default is None. Either `C` or 

29 `cho_C` must be provided. 

30 cho_C (matrix): The lower Cholesky factorization of the data 

31 covariance matrix. Defaults to None. Either `C` or 

32 `cho_C` must be provided. 

33 mu (scalar or vector): The prior mean on the regression coefficients. 

34 Default is zero. 

35 L (scalar, vector, or matrix): The prior covariance. This may be 

36 a scalar, in which case the covariance is assumed to be 

37 homoscedastic, a vector, in which case the covariance 

38 is assumed to be diagonal, or a matrix specifying the full 

39 prior covariance. Default is None. Either `L` or 

40 `cho_L` must be provided. 

41 cho_L (matrix): The lower Cholesky factorization of the prior 

42 covariance matrix. Defaults to None. Either `L` or 

43 `cho_L` must be provided. 

44 N (int, optional): The number of regression coefficients. This is 

45 necessary only if both ``mu`` and ``L`` are provided as scalars. 

46 

47 Returns: 

48 A tuple containing the posterior mean for the regression \ 

49 coefficients (a vector) and the Cholesky factorization \ 

50 of the posterior covariance (a lower triangular matrix). 

51 

52 """ 

53 design_matrix = math.cast(design_matrix) 

54 data = math.cast(data) 

55 C = linalg.Covariance(C, cho_C, N=data.shape[0]) 

56 mu = math.cast(mu) 

57 if L is None and cho_L is None: 

58 raise ValueError( 

59 "Either the prior covariance or its " 

60 "Cholesky factorization must be provided." 

61 ) 

62 elif L is not None: 

63 tmp = math.cast(L) 

64 else: 

65 tmp = math.cast(cho_L) 

66 if mu.ndim == 0 and tmp.ndim == 0: 

67 assert ( 

68 N is not None 

69 ), "Please provide the number of coefficients ``N``." 

70 elif mu.ndim > 0: 

71 N = mu.shape[0] 

72 elif L.ndim > 0: 

73 N = tmp.shape[0] 

74 if mu.ndim == 0: 

75 mu = mu * math.ones(N) 

76 L = linalg.Covariance(L, cho_L, N=N) 

77 return linalg.solve(design_matrix, data, C.cholesky, mu, L.inverse) 

78 

79 

80def lnlike( 

81 design_matrix, 

82 data, 

83 *, 

84 C=None, 

85 cho_C=None, 

86 mu=0.0, 

87 L=None, 

88 cho_L=None, 

89 N=None, 

90 woodbury=True, 

91): 

92 """ 

93 Compute the log marginal likelihood of the data given a design matrix. 

94 

95 Args: 

96 design_matrix (matrix): The design matrix that transforms a vector 

97 from coefficient space to data space. 

98 data (vector): The observed dataset. 

99 C (scalar, vector, or matrix): The data covariance. This may be 

100 a scalar, in which case the noise is assumed to be 

101 homoscedastic, a vector, in which case the covariance 

102 is assumed to be diagonal, or a matrix specifying the full 

103 covariance of the dataset. Default is None. Either `C` or 

104 `cho_C` must be provided. 

105 cho_C (matrix): The lower Cholesky factorization of the data 

106 covariance matrix. Defaults to None. Either `C` or 

107 `cho_C` must be provided. 

108 mu (scalar or vector): The prior mean on the regression coefficients. 

109 Default is zero. 

110 L (scalar, vector, or matrix): The prior covariance. This may be 

111 a scalar, in which case the covariance is assumed to be 

112 homoscedastic, a vector, in which case the covariance 

113 is assumed to be diagonal, or a matrix specifying the full 

114 prior covariance. Default is None. Either `L` or 

115 `cho_L` must be provided. 

116 cho_L (matrix): The lower Cholesky factorization of the prior 

117 covariance matrix. Defaults to None. Either `L` or 

118 `cho_L` must be provided. 

119 N (int, optional): The number of regression coefficients. This is 

120 necessary only if both ``mu`` and ``L`` are provided as scalars. 

121 woodbury (bool, optional): Solve the linear problem using the 

122 Woodbury identity? Default is True. The 

123 `Woodbury identity <https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_ 

124 is used to speed up matrix operations in the case that the 

125 number of data points is much larger than the number of 

126 regression coefficients. In this limit, it can 

127 speed up the code by more than an order of magnitude. Keep 

128 in mind that the numerical stability of the Woodbury identity 

129 is not great, so if you're getting strange results try 

130 disabling this. It's also a good idea to disable this in the 

131 limit of few data points and large number of regressors. 

132 

133 Returns: 

134 The log marginal likelihood, a scalar. 

135 

136 """ 

137 design_matrix = math.cast(design_matrix) 

138 data = math.cast(data) 

139 C = linalg.Covariance(C, cho_C, N=data.shape[0]) 

140 mu = math.cast(mu) 

141 if L is None and cho_L is None: 

142 raise ValueError( 

143 "Either the prior covariance or its " 

144 "Cholesky factorization must be provided." 

145 ) 

146 elif L is not None: 

147 tmp = math.cast(L) 

148 else: 

149 tmp = math.cast(cho_L) 

150 if mu.ndim == 0 and tmp.ndim == 0: 

151 assert ( 

152 N is not None 

153 ), "Please provide the number of coefficients ``N``." 

154 elif mu.ndim > 0: 

155 N = mu.shape[0] 

156 elif L.ndim > 0: 

157 N = tmp.shape[0] 

158 if mu.ndim == 0: 

159 mu = mu * math.ones(N) 

160 L = linalg.Covariance(L, cho_L, N=N) 

161 if woodbury: 

162 return linalg.lnlike_woodbury( 

163 design_matrix, data, C.inverse, mu, L.inverse, C.lndet, L.lndet 

164 ) 

165 else: 

166 return linalg.lnlike(design_matrix, data, C.value, mu, L.value)