@article{Bai2009,
author = {Bai, Jushan and Ng, Serena},
doi = {10.1002/jae.1063},
file = {:E$\backslash$:/Dropbox/phd/shrinkage/Bai\_et\_al-2009-Journal\_of\_Applied\_Econometrics.pdf:pdf},
issn = {08837252},
journal = {Journal of Applied Econometrics},
month = jun,
number = {4},
pages = {607--629},
title = {{Boosting diffusion indices}},
url = {http://doi.wiley.com/10.1002/jae.1063},
volume = {24},
year = {2009}
}
@article{Banbura2013,
author = {Bańbura, Marta and Giannone, Domenico and Modugno, Michele and Reichlin, Lucrezia},
file = {:E$\backslash$:/Dropbox/phd/nowcast/Now-casting and the real-time data flow.pdf:pdf},
keywords = {Macroeconomic news, macroeconomic forecasting, rea,macroeconomic forecasting,rea},
number = {15},
title = {{Wo r k i n g Pa p e r S e r i e S the real-time data flow NOTE : This Working Paper should not be reported as representing}},
year = {2013}
}
@article{Bates2013,
abstract = {This paper considers the estimation of approximate dynamic factor models when there is temporal instability in the factor loadings. We characterize the type and magnitude of instabilities under which the principal components estimator of the factors is consistent and find that these instabilities can be larger than earlier theoretical calculations suggest. We also discuss implications of our results for the robustness of regressions based on the estimated factors and of estimates of the number of factors in the presence of parameter instability. Simulations calibrated to an empirical application indicate that instability in the factor loadings has a limited impact on estimation of the factor space and diffusion index forecasting, whereas estimation of the number of factors is more substantially affected. © 2013 Elsevier B.V. All rights reserved.},
author = {Bates, Brandon J. and Plagborg-M\o ller, Mikkel and Stock, James H. and Watson, Mark W.},
doi = {10.1016/j.jeconom.2013.04.014},
isbn = {03044076},
issn = {03044076},
journal = {Journal of Econometrics},
number = {2},
pages = {289--304},
title = {{Consistent factor estimation in dynamic factor models with structural instability}},
volume = {177},
year = {2013}
}
@article{Baumeister2014,
author = {Baumeister, Christiane and Gu\'{e}rin, Pierre and Kilian, Lutz},
doi = {10.1016/j.ijforecast.2014.06.005},
issn = {0169-2070},
keywords = {Econometric and statistical methods,International topics},
title = {{Do High-Frequency Financial Data Help Forecast Oil Prices ? The MIDAS Touch at Work Do High-Frequency Financial Data Help Forecast Oil Prices ? The MIDAS Touch at Work}},
year = {2014}
}
@article{Castle2014,
abstract = {We investigate alternative robust approaches to forecasting, using a new class of robust devices, contrasted with equilibrium correction models.� Their forecasting properties are derived facing a range of likely empirical problems at the forecast origin, including measurement errors, implulses, omitted variables, unanticipated location shifts and incorrectly included variables that experience a shift.� We derive the resulting forecast biases and error variances, and indicate when the methods are likely to perform well.� The robust methods are applied to forecasting US GDP using autoregressive models, and also to autoregressive models with factors extracted from a large dataset of macroeconomic variables.� We consider forecasting performance over the Great Recession, and over an earlier more quiescent period.},
author = {Castle, Jennifer and Hendry, David and Clements, Michael P.},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Castle, Hendry, Clements - 2014 - Robust Approaches to Forecasting.pdf:pdf},
journal = {Economics Series Working Papers},
keywords = {Factor models,GDP forecasts,Location shifts,Robust forecasts,Smoothed Forecasting devices},
month = jan,
publisher = {University of Oxford, Department of Economics},
title = {{Robust Approaches to Forecasting}},
url = {http://ideas.repec.org/p/oxf/wpaper/697.html},
year = {2014}
}
@book{Duarte2014,
author = {Duarte, Cl\'{a}udia},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/AR-MIDAS/Autoregressive augmentation of MIDAS regressions.pdf:pdf},
title = {{AUTOREGRESSIVE AUGMENTATION OF}},
year = {2014}
}
@article{Hassani2015,
author = {Hassani, Hossein and Silva, Emmanuel Sirimal},
doi = {10.1007/s40745-015-0029-9},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hassani, Silva - 2015 - Forecasting with Big Data A Review.pdf:pdf},
issn = {2198-5804},
journal = {Annals of Data Science},
month = apr,
title = {{Forecasting with Big Data: A Review}},
url = {http://link.springer.com/10.1007/s40745-015-0029-9},
year = {2015}
}
@article{Hyndman2002a,
abstract = {We provide a new approach to automatic forecasting based on an extended range of exponential smoothing methods. Each method in our taxonomy of exponential smoothing methods provides forecasts that are equivalent to forecasts from a state space model. This equivalence allows: (1) easy calculation of the likelihood, the AIC and other model selection criteria; (2) computation of prediction intervals for each method; and (3) random simulation from the underlying state space model. We demonstrate the methods by applying them to the data from the M-competition and the M3-competition. The method provides forecast accuracy comparable to the best methods in the competitions; it is particularly good for short forecast horizons with seasonal data. ?? 2002 International Institute of Forecasters. Published by Elsevier Science B.V. All rights reserved.},
author = {Hyndman, Rob J. and Koehler, Anne B. and Snyder, Ralph D. and Grose, Simone},
doi = {10.1016/S0169-2070(01)00110-8},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hyndman et al. - 2002 - A state space framework for automatic forecasting using exponential smoothing methods.pdf:pdf},
isbn = {0169-2070},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Automatic forecasting,Exponential smoothing,Prediction intervals,State space models},
title = {{A state space framework for automatic forecasting using exponential smoothing methods}},
year = {2002}
}
@article{Kuzin2011a,
abstract = {This paper compares the mixed-data sampling (MIDAS) and mixed-frequency VAR (MF-VAR) approaches to model specification in the presence of mixed-frequency data, e.g. monthly and quarterly series. MIDAS leads to parsimonious models which are based on exponential lag polynomials for the coefficients, whereas MF-VAR does not restrict the dynamics and can therefore suffer from the curse of dimensionality. However, if the restrictions imposed by MIDAS are too stringent, the MF-VAR can perform better. Hence, it is difficult to rank MIDAS and MF-VAR a priori, and their relative rankings are better evaluated empirically. In this paper, we compare their performances in a case which is relevant for policy making, namely nowcasting and forecasting quarterly GDP growth in the euro area on a monthly basis, using a set of about 20 monthly indicators. It turns out that the two approaches are more complements than substitutes, since MIDAS tends to perform better for horizons up to four to five months, whereas MF-VAR performs better for longer horizons, up to nine months. ?? 2010 International Institute of Forecasters.},
author = {Kuzin, Vladimir and Marcellino, Massimiliano and Schumacher, Christian},
doi = {10.1016/j.ijforecast.2010.02.006},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/MIDAS vs. mixed-frequency VAR.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {MIDAS,Mixed-frequency VAR,Mixed-frequency data,Nowcasting},
number = {2},
pages = {529--542},
publisher = {Elsevier B.V.},
title = {{MIDAS vs. mixed-frequency VAR: Nowcasting GDP in the euro area}},
url = {http://dx.doi.org/10.1016/j.ijforecast.2010.02.006},
volume = {27},
year = {2011}
}
@article{Kvedaras2012,
abstract = {We propose a test for the evaluation of statistical acceptability of a functional constraint which is imposed on parameters in the mixed data sampling regressions. The asymptotic behavior of the test statistic is characterized and a few other extensions are discussed. ?? 2012 Elsevier B.V.},
author = {Kvedaras, Virmantas and Zemlys, Vaidotas},
doi = {10.1016/j.econlet.2012.03.009},
issn = {01651765},
journal = {Economics Letters},
keywords = {Constraint,MIDAS,Restriction,Specification,Test},
number = {2},
pages = {250--254},
publisher = {Elsevier B.V.},
title = {{Testing the functional constraints on parameters in regressions with variables of different frequency}},
url = {http://dx.doi.org/10.1016/j.econlet.2012.03.009},
volume = {116},
year = {2012}
}
@article{Mitchell2014,
author = {Mitchell, James},
doi = {10.1016/j.ijforecast.2013.11.002},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Mitchell - 2014 - Discussion of “Forecasting macroeconomic variables using collapsed dynamic factor analysis” by Falk Br\"{a}uning and Sie.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
month = jul,
number = {3},
pages = {585--588},
title = {{Discussion of “Forecasting macroeconomic variables using collapsed dynamic factor analysis” by Falk Br\"{a}uning and Siem Jan Koopman}},
url = {http://www.sciencedirect.com/science/article/pii/S0169207013001611},
volume = {30},
year = {2014}
}
@article{Ng2013,
abstract = {The object of this paper is to produce non-parametric maximum likelihood estimates of forecast distributions in a general non-Gaussian, non-linear state space setting. The transition densities that define the evolution of the dynamic state process are represented in parametric form, but the conditional distribution of the non-Gaussian variable is estimated non-parametrically. The filtered and prediction distributions are estimated via a computationally efficient algorithm that exploits the functional relationship between the observed variable, the state variable and a measurement error with an invariant distribution. Simulation experiments are used to document the accuracy of the non-parametric method relative to both correctly and incorrectly specified parametric alternatives. In an empirical illustration, the method is used to produce sequential estimates of the forecast distribution of realized volatility on the S\&P500 stock index during the recent financial crisis. A resampling technique for measuring sampling variation in the estimated forecast distributions is also demonstrated.},
author = {Ng, Jason and Forbes, Catherine S. and Martin, Gael M. and McCabe, Brendan P.M.},
doi = {10.1016/j.ijforecast.2012.10.005},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Ng et al. - 2013 - Non-parametric estimation of forecast distributions in non-Gaussian, non-linear state space models.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Grid-based filtering,Non-Gaussian time series,Penalized likelihood,Probabilistic forecasting,Realized volatility,Subsampling},
month = jul,
number = {3},
pages = {411--430},
title = {{Non-parametric estimation of forecast distributions in non-Gaussian, non-linear state space models}},
url = {http://www.sciencedirect.com/science/article/pii/S0169207012001665},
volume = {29},
year = {2013}
}
@article{West2013,
abstract = {Since the 1970s, applications of Bayesian time series models and forecasting methods have represented major success stories for our discipline. Dynamic modelling is a very broad field, so this ISBA Lecture on Bayesian Foundations will rather selectively note key concepts and some core model contexts, leavened with extracts froma few time series analysis and forecasting examples from various application fields. The Lecture with then link into and briefly discuss a range of recent developments in exciting and challenging areas of Bayesian time series analysis.},
author = {West, Mike},
doi = {10.1093/acprof:oso/9780199695607.003.0008},
file = {:E$\backslash$:/Dropbox/phd/statespace/BAYESIAN DYNAMIC MODELLING.pdf:pdf},
isbn = {9780199695607},
journal = {Bayesian Inference and Markov Chain Monte Carlo: In \ldots},
title = {{Bayesian dynamic modelling}},
url = {http://books.google.com/books?hl=en\&lr=\&id=rpuo2eC6-EsC\&oi=fnd\&pg=PA145\&dq=Bayesian+dynamic+modelling\&ots=tmZ0q4kXbx\&sig=MQVb9RSpunR0GoJ7XEWrDvgZuyY},
year = {2013}
}
@article{Alessi2014,
author = {Alessi, Lucia},
file = {:E$\backslash$:/Dropbox/phd/forecast/Central Bank Macroeconomic Forecasting.pdf:pdf},
number = {680},
title = {{Central Bank Macroeconomic Forecasting during the Global Financial Crisis : The European Central Bank and Federal Reserve Bank of New York Experiences}},
year = {2014}
}
@article{Doz2006a,
abstract = {This paper considers quasi-maximum likelihood estimations of a dynamic ap- proximate factor model when the panel of time series is large. Maximum likelihood is analyzed under different sources of misspecification: omitted serial correlation of the observations and cross-sectional correlation of the idiosyncratic components. It is shown that the effects ofmisspecification on the estimation of the common factors is negligible for large sample size (T) and the cross-sectional dimension (n). The estimator is feasible when n is large and easily implementable using the Kalman smoother and theEMalgorithm as in traditional factor analysis. Simulation results illustrate what are the empirical conditions in which we can expect improvement with respect to simple principle components considered by Bai (2003), Bai and Ng (2002), Forni, Hallin, Lippi, and Reichlin (2000, 2005b), Stock and Watson (2002a,b).},
author = {Doz, Catherine and Giannone, Domenico and Reichlin, Lucrezia},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/DYNAMIC FACTORecbwp674.pdf:pdf},
keywords = {Factor Model,Quasi Maximum Likelihood.,large cross-sections},
number = {674},
title = {{A quasi maximum likelihood approach for large approximate dynamic factor models}},
year = {2006}
}
@article{Foroni2014,
abstract = {In this paper, we focus on the different methods which have been proposed in the literature to date for dealing with mixed-frequency and ragged-edge datasets: bridge equations, mixed-data sampling (MIDAS), and mixed-frequency VAR (MF-VAR) models. We discuss their performances for nowcasting the quarterly growth rate of the Euro area GDP and its components, using a very large set of monthly indicators. We investigate the behaviors of single indicator models, forecast combinations and factor models, in a pseudo real-time framework. MIDAS with an AR component performs quite well, and outperforms MF-VAR at most horizons. Bridge equations perform well overall. Forecast pooling is superior to most of the single indicator models overall. Pooling information using factor models gives even better results. The best results are obtained for the components for which more economically related monthly indicators are available. Nowcasts of GDP components can then be combined to obtain nowcasts for the total GDP growth.},
author = {Foroni, Claudia and Marcellino, Massimiliano},
doi = {10.1016/j.ijforecast.2013.01.010},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Bridge models,Factor models,MIDAS,Mixed-frequency VAR,Mixed-frequency data,Nowcasting},
month = jul,
number = {3},
pages = {554--568},
title = {{A comparison of mixed frequency approaches for nowcasting Euro area macroeconomic aggregates}},
url = {http://www.sciencedirect.com/science/article/pii/S016920701300040X},
volume = {30},
year = {2014}
}
@article{Giannone2008,
abstract = {A formal method is developed for evaluating the marginal impact that intra-monthly data releases have on current-quarter forecasts (nowcasts) of real gross domestic product (GDP) growth. The method can track the real-time flow of the type of information monitored by central banks because it can handle large data sets with staggered data-release dates. Each time new data are released, the nowcasts are updated on the basis of progressively larger data sets that, reflecting the unsynchronized data-release dates, have a “jagged edge” across the most recent months.},
author = {Giannone, Domenico and Reichlin, Lucrezia and Small, David},
doi = {10.1016/j.jmoneco.2008.05.010},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Giannone, Reichlin, Small - 2008 - Nowcasting The real-time informational content of macroeconomic data.pdf:pdf},
issn = {03043932},
journal = {Journal of Monetary Economics},
keywords = {C33,C53,E52,Factor model,Forecasting,Monetary policy,Nowcast,Real-time data},
month = may,
number = {4},
pages = {665--676},
title = {{Nowcasting: The real-time informational content of macroeconomic data}},
url = {http://www.sciencedirect.com/science/article/pii/S0304393208000652},
volume = {55},
year = {2008}
}
@article{Marcellino2006,
abstract = {“Iterated” multiperiod-ahead time series forecasts are made using a one-period ahead model, iterated forward for the desired number of periods, whereas “direct” forecasts are made using a horizon-specific estimated model, where the dependent variable is the multiperiod ahead value being forecasted. Which approach is better is an empirical matter: in theory, iterated forecasts are more efficient if the one-period ahead model is correctly specified, but direct forecasts are more robust to model misspecification. This paper compares empirical iterated and direct forecasts from linear univariate and bivariate models by applying simulated out-of-sample methods to 170 U.S. monthly macroeconomic time series spanning 1959–2002. The iterated forecasts typically outperform the direct forecasts, particularly, if the models can select long-lag specifications. The relative performance of the iterated forecasts improves with the forecast horizon.},
author = {Marcellino, Massimiliano and Stock, James H. and Watson, Mark W.},
doi = {10.1016/j.jeconom.2005.07.020},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Marcellino, Stock, Watson - 2006 - A comparison of direct and iterated multistep AR methods for forecasting macroeconomic time series.pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
keywords = {C32,E37,E47,Forecast comparisons,Multistep forecasts,Var forecasts},
month = nov,
number = {1-2},
pages = {499--526},
title = {{A comparison of direct and iterated multistep AR methods for forecasting macroeconomic time series}},
url = {http://www.sciencedirect.com/science/article/pii/S030440760500165X},
volume = {135},
year = {2006}
}
@book{Mikosch2015,
author = {Mikosch, Heiner and Neuwirth, Stefan},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/VAR/Real-Time Forecasting with a MIDAS VAR.pdf:pdf},
isbn = {9789523230422},
keywords = {JEL classi cations: C53, E27
Keywords: forecastin},
title = {{Real-time forecasting with a MIDAS VAR Institute for Economies in Transition}},
year = {2015}
}
@article{Stock2005,
abstract = {This paper considers VAR models incorporating many time series that interact through a few dynamic factors. Several econometric issues are addressed including estimation of the number of dynamic factors and tests for the factor restrictions imposed on the VAR. Structural VAR identification based on timing restrictions, long run restrictions, and restrictions on factor loadings are discussed and practical computational methods suggested. Empirical analysis using U.S. data suggest several (7) dynamic factors, rejection of the exact dynamic factor model but support for an approximate factor model, and sensible results for a SVAR that identifies money policy shocks using timing restrictions.},
author = {Stock, James H and Watson, M W},
journal = {NBER working paper},
number = {June},
title = {{IMPLICATIONS OF DYNAMIC FACTOR MODELS}},
url = {http://papers.ssrn.com/sol3/papers.cfm?abstract\_id=755703},
year = {2005}
}
@article{Banbura2010,
abstract = {We define nowcasting as the prediction of the present, the very near future and the very recent past. Crucial in this process is to use timely monthly information in order to nowcast key economic variables, such as e.g. GDP, that are typically collected at low frequency and published with long delays. Until recently, nowcasting had received very little attention by the academic literature, although it was routinely conducted in policy institutions either through a judgemental process or on the basis of simple models. We argue that the nowcasting process goes beyond the simple production of an early estimate as it essentially requires the assessment of the impact of new data on the subsequent forecast revisions for the target variable. We design a statistical model which produces a sequence of nowcasts in relation to the real time releases of various economic data. The methodology allows to process a large amount of information, as it is traditionally done by practitioners using judgement, but it does it in a fully automatic way. In particular, it provides an explicit link between the news in consecutive data releases and the resulting forecast revisions. To illustrate our ideas, we study the nowcast of euro area GDP in the fourth quarter of 2008.},
author = {Banbura, Marta and Giannone, Domenico and Reichlin, Lucrezia},
title = {{Nowcasting}},
year = {2010}
}
@article{Castle2013a,
abstract = {We consider the reasons for nowcasting, how nowcasts can be achieved, and the use and timing of information.� The existence of contemporaneous data such as surveys is a major difference from forecasting, but many of the recent lessons about forecasting remain relevant.� Given the extensive disaggregation over variables underlying flash estimates of aggregates, we show that automatic model selection can play a valuable role, especially when location shifts would otherwise induce nowcast failure.� Thus, we address nowcasting when location shifts occur, probably with measurement error.� We describe impulse-indicator saturation as a potential solution to such shifts, noting its relation to intercept corrections and to robust methods to avoid systematic nowcast failure.� We propose a nowcasting strategy, building models of all disaggregate series by automatic methods, forecasting all variables before the end of each period, testing for shifts as available measures arrive, and adjusting forecasts of cognate missing series if substantive discrepancies are found.� An alternative is switching to robust forecasts when breaks are detected.� We apply a variant of this strategy to nowcast UK GDP growth, seeking pseudo real-time data availability.},
author = {Castle, Jennifer and Hendry, David},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Castle, Hendry - 2013 - Forecasting and Nowcasting Macroeconomic Variables A Methodological Overview.pdf:pdf},
journal = {Economics Series Working Papers},
keywords = {Autometrics,Contemporaneous information,Forecasting,Impulse-indicator saturation,Location shifts,Nowcasting},
month = sep,
publisher = {University of Oxford, Department of Economics},
title = {{Forecasting and Nowcasting Macroeconomic Variables: A Methodological Overview}},
url = {http://ideas.repec.org/p/oxf/wpaper/674.html},
year = {2013}
}
@article{Harvey2006,
author = {Harvey, Andrew},
doi = {10.1016/S1574-0706(05)01007-4},
file = {:E$\backslash$:/Dropbox/phd/statespace/Forecasting with Unobserved Components Time series.pdf:pdf},
journal = {Handbook of Economic Forecasting},
number = {05},
title = {{Forecasting with Unobserved Components Time-Series Models}},
volume = {1},
year = {2006}
}
@article{Mariano2010,
author = {Mariano, Roberto S. and Murasawa, Yasutomo},
doi = {10.1111/j.1468-0084.2009.00567.x},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/factor/Mariano\_et\_al-2010-Oxford\_Bulletin\_of\_Economics\_and\_Statistics.pdf:pdf},
issn = {03059049},
journal = {Oxford Bulletin of Economics and Statistics},
number = {1},
pages = {27--46},
title = {{A coincident index, common factors, and monthly real GDP}},
volume = {72},
year = {2010}
}
@article{Nowman1998,
author = {Nowman, K.B.},
doi = {10.1023/A:1008682814171},
issn = {1572-9974},
journal = {Computational Economics},
language = {en},
month = dec,
number = {3},
pages = {243--254},
publisher = {Kluwer Academic Publishers},
title = {{Econometric Estimation of a Continuous Time Macroeconomic Model of the United Kingdom with Segmented Trends}},
url = {http://link.springer.com/article/10.1023/A:1008682814171},
volume = {12},
year = {1998}
}
@article{Doz2011,
abstract = {This paper shows consistency of a two-step estimation of the factors in a dynamic approximate factor model when the panel of time series is large (n large). In the first step, the parameters of the model are estimated from an OLS on principal components. In the second step, the factors are estimated via the Kalman smoother. The analysis develops the theory for the estimator considered in Giannone et al. (2004) and Giannone et al. (2008) and for the many empirical papers using this framework for nowcasting.},
author = {Doz, Catherine and Giannone, Domenico and Reichlin, Lucrezia},
doi = {10.1016/j.jeconom.2011.02.012},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Doz, Giannone, Reichlin - 2011 - A two-step estimator for large approximate dynamic factor models based on Kalman filtering.pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
keywords = {C32,C33,C51,Factor models,Kalman filter,Large cross-sections,Principal components},
month = sep,
number = {1},
pages = {188--205},
title = {{A two-step estimator for large approximate dynamic factor models based on Kalman filtering}},
url = {http://www.sciencedirect.com/science/article/pii/S030440761100039X},
volume = {164},
year = {2011}
}
@article{Economics2006,
author = {Economics, Development and Nakstad, Yoon Shin},
file = {:E$\backslash$:/Dropbox/phd/statespace/Thesis\_Structural\_time\_series\_model\_FINALFINAL.pdf:pdf},
number = {August},
title = {{Structural Time Series Models : theory and application Department of Economics}},
year = {2006}
}
@article{Galvao2013,
abstract = {When assessing the predictive power of financial variables for economic activity, researchers usually aggregate higher-frequency data before estimating a forecasting model that assumes the relationship between the financial variable and the dependent variable to be linear. This paper proposes a model called smooth transition mixed data sampling (STMIDAS) regression, which relaxes both of these assumptions. Simulation exercises indicate that the improvements in forecasting accuracy from the use of mixed data sampling are larger in nonlinear than in linear specifications. When forecasting output growth with financial variables in real time, statistically significant improvements over a linear regression are more likely to arise from forecasting with STMIDAS than with MIDAS regressions.},
author = {Galv\~{a}o, Ana Beatriz},
doi = {10.1016/j.ijforecast.2012.10.006},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Galv\~{a}o - 2013 - Changes in predictive ability with mixed frequency data.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Economic activity,Financial indicators,MIDAS,Predictive ability,Smooth transition},
month = jul,
number = {3},
pages = {395--410},
title = {{Changes in predictive ability with mixed frequency data}},
url = {http://www.sciencedirect.com/science/article/pii/S0169207012001689},
volume = {29},
year = {2013}
}
@article{Harvey1990,
author = {Harvey, a C and Peters, S},
file = {:E$\backslash$:/Dropbox/phd/statespace/Harvey\_Peters1990.pdf:pdf},
journal = {Journal of Forecasting},
number = {June 1988},
pages = {89--108},
title = {{Estimation Procedures for Structural Time Series Models}},
volume = {9},
year = {1990}
}
@article{JennieBai,
author = {{Jennie Bai}, Eric Ghysels, Jonathan H. Wright},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Jennie Bai - Unknown - State Space Models and MIDAS Regressions ∗.pdf:pdf},
title = {{State Space Models and MIDAS Regressions ∗}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.184.4807}
}
@book{Koop2010,
abstract = {Macroeconomic practitioners frequently work with multivariate time series models such as VARs, factor augmented VARs as well as timevarying parameter versions of these models (including variants with multivariate stochastic volatility). These models have a large number of parameters and, thus, over-parameterization problems may arise. Bayesian methods have become increasingly popular as a way of overcoming these problems. In this monograph, we discuss VARs, factor augmented VARs and time-varying parameter extensions and show how Bayesian inference proceeds. Apart from the simplest of VARs, Bayesian inference requires the use of Markov chain Monte Carlo methods developed for state space models and we describe these algorithms. The focus is on the empirical macroeconomist and we offer advice on how to use these models and methods in practice and include empirical illustrations. A website provides Matlab code for carrying out Bayesian inference in these models.},
author = {Koop, Gary and Korobilis, D.},
doi = {10.1561/0800000013},
file = {:E$\backslash$:/Dropbox/phd/bayesian/courses/koop/koop\_korobilis\_Foundations\_and\_Trends\_2010.pdf:pdf},
isbn = {0800000013},
issn = {1551-3076},
keywords = {Economic Theory},
number = {4},
pages = {267--358},
title = {{Bayesian Multivariate Time Series Methods for Empirical Macroeconomics}},
url = {http://strathprints.strath.ac.uk/28122/},
volume = {3},
year = {2010}
}
@article{Marcellino2007b,
author = {Marcellino, Massimiliano and Bocconi, Universit\`{a} and Schumacher, Christian},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/01 slide Factor nowcasting of German GDP.pdf:pdf},
keywords = {authors,business cycle,data,ect the views of,large factor models,midas,missing values,mixed-frequency,not necessarily re,nowcasting,personal opinions and does,the,this paper represents the},
number = {October},
pages = {1--45},
title = {{Factor nowcasting of German GDP with ragged-edge data : A model comparison using MIDAS projections}},
year = {2007}
}
@article{Poncela2004,
author = {Poncela, Pilar},
doi = {10.1016/j.ijforecast.2003.11.005},
isbn = {0198523548 (acid-free paper)},
issn = {01692070},
journal = {International Journal of Forecasting},
number = {1},
pages = {139--141},
title = {{Time series analysis by state space methods}},
volume = {20},
year = {2004}
}
@article{Sells1995,
abstract = {A stochastic dynamic programme determines the farmer's long-term weed control strategy incorporating decisions of crop, autumn cultivations, timing of planting and herbicide treatments. A previous paper (Sells, (1993) Agric. Syst., 41, 41–52) describes the one-step transition probabilities due to variable weed control from year to year. This paper describes the costing of the decision options and shows the results from the model for the optimal control of one difficult grass weed in cereals. The model is shown to give reasonable results and shows that the control strategy which optimises costs is to use half-rate herbicide over a wide range of seedbanks. A simulation of the optimum strategy over 10 years shows that the total overall use of herbicide is also reduced.},
author = {Sells, J.E.},
doi = {10.1016/0308-521X(94)00016-K},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Sells - 1995 - Optimising weed management using stochastic dynamic programming to take account of uncertain herbicide performance.pdf:pdf},
issn = {0308521X},
journal = {Agricultural Systems},
month = jan,
number = {3},
pages = {271--296},
title = {{Optimising weed management using stochastic dynamic programming to take account of uncertain herbicide performance}},
url = {http://www.sciencedirect.com/science/article/pii/0308521X9400016K},
volume = {48},
year = {1995}
}
@article{Stock2012a,
abstract = {This paper provides a simple shrinkage representation that describes the operational characteristics of various forecasting methods designed for a large number of orthogonal predictors (such as principal components). These methods include pretest methods, Bayesian model averaging, empirical Bayes, and bagging. We compare empirically forecasts from these methods to dynamic factor model (DFM) forecasts using a U.S. macroeconomic data set with 143 quarterly variables spanning 1960-2008. For most series, including measures of real economic activity, the shrinkage forecasts are inferior to the DFM forecasts},
author = {Stock, Jh and Watson, Mw},
doi = {10.1080/07350015.2012.715956},
file = {:E$\backslash$:/Dropbox/phd/shrinkage/stock\_watson\_generalized\_shrinkage\_June\_2012.pdf:pdf},
isbn = {0735-0015$\backslash$r1537-2707},
issn = {0735-0015},
journal = {Journal of Business and Economic Statistics},
keywords = {dynamic factor models,empirical Bayes,high dimensional model},
number = {4},
pages = {481--493},
title = {{Generalized shrinkage methods for forecasting using many predictors}},
url = {http://amstat.tandfonline.com/doi/abs/10.1080/07350015.2012.715956},
volume = {30},
year = {2012}
}
@misc{,
keywords = {B Jungbacker \& S J Koopman,Econometrics of Forecasting},
mendeley-tags = {Econometrics of Forecasting},
title = {{Likelihood-Based Dynamic Factor Analysis}},
url = {http://www.fsmevents.com/res/4specialc2/JungbackerKoopmanSlides.pdf},
urldate = {2015-04-28}
}
@article{Agostino,
author = {Agostino, Antonello D and Mechanism, European Stability and Giannone, Domenico and Reserve, Federal and York, New and Lenza, Michele and Bank, European Central and Modugno, Michele and Board, Federal Reserve},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Agostino et al. - Unknown - Nowcasting Business Cycles a Bayesian Approach to Dynamic Heterogeneous Factor Models.pdf:pdf},
keywords = {and do not necessarily,and the federal reserve,busi-,current economic conditions,dynamic factor models,dynamic heterogeneity,european central bank or,eurosystem,ness cycles,nowcasting,real time,reflect those of the,supported by the,system,the,the european stability mechanism,the views expressed are,this work was partly,those of the authors},
pages = {1--24},
title = {{Nowcasting Business Cycles : a Bayesian Approach to Dynamic Heterogeneous Factor Models}}
}
@article{Bencivelli,
abstract = {This paper proposes the use of Bayesian model averaging (BMA) as a tool to select the predictors' set for bridge models. BMA is a computationally feasible method that allows us to explore the model space even in the presence of a large set of candidate predictors. We test the performance of BMA in now-casting by means of a recursive experiment for the euro area and the three largest countries. This method allows flexibility in selecting the information set month by month. We find that BMA based bridge models produce smaller forecast error than fixed composition bridges. In an application to the euro area they perform at least as well as medium-scale factor models.},
author = {Bencivelli, Lorenzo and Marcellino, Massimiliano and Moretti, Gianluca},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Bencivelli, Marcellino, Moretti - Unknown - Selecting predictors by using Bayesian model averaging in bridge models.pdf:pdf},
journal = {Temi di discussione (Economic working papers)},
keywords = {Bayesian model averaging,bridge models.,business cycle analysis,forecasting},
publisher = {Bank of Italy, Economic Research and International Relations Area},
title = {{Selecting predictors by using Bayesian model averaging in bridge models}},
url = {http://ideas.repec.org/p/bdi/wptemi/td\_872\_12.html}
}
@article{Brodersen2013,
abstract = {An important problem in econometrics and marketing is to infer the causal impact that a designed market intervention has exerted on an outcome metric over time. In order to allocate a given budget optimally, for example, an advertiser must determine the incremental contributions that different advertising campaigns have made to web searches, product installs, or sales. This paper proposes to infer causal impact on the basis of a diffusion-regression state-space model that predicts the counterfactual market response that would have occurred had no intervention taken place. In con- trast to classical difference-in-differences schemes, state-space models make it possible to (i) infer the temporal evolution of attributable impact, (ii) incorporate empirical priors on the parameters in a fully Bayesian treatment, and (iii) flexibly accommodate multiple sources of variation, including the time-varying influence of contemporane- ous covariates, i.e., synthetic controls. Using a Markov chain Monte Carlo algorithm for posterior inference, we illustrate the statistical properties of our approach on synthetic data. We then demonstrate its practical utility by evaluating the effect of an online advertising campaign on search-related site visits. We discuss the strengths and limitations of our approach in improving the accuracy of causal at- tribution, power analyses, and principled budget allocation.},
author = {Brodersen, Kh and Gallusser, Fabian and Koehler, Jim},
keywords = {causal inference, counterfactual, synthetic contro},
number = {June},
pages = {1--32},
title = {{Inferring Causal Impact Using Bayesian Structural Time-Series Models}},
url = {http://192.249.122.146/sites/default/files/Inferring causal impact using Bayesian structural time-series models.pdf},
year = {2013}
}
@article{Carriero2012b,
abstract = {This paper develops a method for producing current-quarter forecasts of GDP growth with a (possibly large) range of available within-the-quarter monthly observations of economic indicators, such as employment and industrial production, and financial indicators, such as stock prices and interest rates. In light of existing evidence of time variation in the variances of shocks to GDP, we consider versions of the model with both constant variances and stochastic volatility. We also evaluate models with either constant or time-varying regression coefficients. We use Bayesian methods to estimate the model, in order to facilitate providing shrinkage on the (possibly large) set of model parameters and conveniently generate predictive densities. We provide results on the accuracy of nowcasts of real-time GDP growth in the U.S. from 1985 through 2011. In terms of point forecasts, our proposal is comparable to alternative econometric methods and survey forecasts. In addition, it provides reliable density forecasts, for which the stochastic volatility specification is quite useful, while parameter time-variation does not seem to matter.},
author = {Carriero, Andrea and Clark, Todd E and Marcellino, Massimiliano},
file = {:E$\backslash$:/Dropbox/phd/bsts/Carriero\_et\_al-2015-Journal\_of\_the\_Royal\_Statistical\_Society\_\_Series\_A\_(Statistics\_in\_Society).pdf:pdf},
journal = {FRB Cleveland Working Papers},
keywords = {an associate,bayesian methods,brent meyer,c22,c53,cation,classi,domenico giannone,e,e37,editor,editor arnaud chevalier,forecasting,helpful suggestions from the,j,knut are aastveit,l,marta banbura,mixed frequency models,prediction,the authors gratefully acknowledge,two anonymous referees},
pages = {1--58},
title = {{Real-Time Nowcasting With a Bayesian Mixed Frequency Model With Stochastic Volatility}},
url = {http://www.clevelandfed.org/research/workpaper/2012/wp1227.pdf$\backslash$npapers2://publication/uuid/34A65C43-FDA0-45A3-85DF-5E190B943653},
year = {2012}
}
@article{Carriero2011,
author = {Carriero, Andrea and Kapetanios, George and Marcellino, Massimiliano},
doi = {10.1002/jae.1150},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/Carriero\_et\_al-2011-Journal\_of\_Applied\_Econometrics.pdf:pdf},
issn = {08837252},
journal = {Journal of Applied Econometrics},
month = aug,
number = {5},
pages = {735--761},
title = {{Forecasting large datasets with Bayesian reduced rank multivariate models}},
url = {http://doi.wiley.com/10.1002/jae.1150},
volume = {26},
year = {2011}
}
@article{Clyde1999,
author = {Clyde, Merlise a.},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/Bayesian Model Averaging and Model Search Strategies.pdf:pdf},
journal = {Bayesian Statistics 6},
keywords = {generalized linear models,monte carlo,reversible jump markov chain},
pages = {157--185},
title = {{Bayesian Model Averaging and Model Search Strategies}},
year = {1999}
}
@article{Bec2013,
abstract = {This paper investigates the predictive accuracy of two alternative forecasting strategies, namely the forecast and information combinations. Theoretically, there should be no role for forecast combinations in a world where information sets can be instantaneously and costlessly combined. However, following some recent works which claim that this result holds in population but not necessarily in small samples, our paper questions this postulate empirically in a real-time and mixed-frequency framework. An application to the quarterly growth rate of French GDP reveals that, given a set of predictive models involving coincident indicators, a simple average of individual forecasts outperforms the individual forecasts, as long as no individual model encompasses the others. Furthermore, the simple average of individual forecasts outperforms, or it is statistically equivalent to, more sophisticated forecast combination schemes. However, when a predictive encompassing model is obtained by combining information sets, this model outperforms the most accurate forecast combination strategy.},
author = {Bec, Fr\'{e}d\'{e}rique and Mogliani, Matteo},
journal = {Working papers},
keywords = {Forecast Combinations,Macroeconomic Nowcasting,Mixed-frequency data.,Pooling Information,Real-time data},
title = {{Nowcasting French GDP in Real-Time from Survey Opinions: Information or Forecast Combinations?}},
url = {http://ideas.repec.org/p/bfr/banfra/436.html},
year = {2013}
}
@article{Jungbacker2011a,
abstract = {This paper concerns estimating parameters in a high-dimensional dynamic factor model by the method of maximum likelihood. To accommodate missing data in the analysis, we propose a new model representation for the dynamic factor model. It allows the Kalman filter and related smoothing methods to evaluate the likelihood function and to produce optimal factor estimates in a computationally efficient way when missing data is present. The implementation details of our methods for signal extraction and maximum likelihood estimation are discussed. The computational gains of the new devices are presented based on simulated data sets with varying numbers of missing entries. ?? 2011 Elsevier B.V.},
author = {Jungbacker, B. and Koopman, S. J. and van der Wel, M.},
journal = {Journal of Economic Dynamics and Control},
keywords = {High-dimensional vector series,Kalman filtering and smoothing,Unbalanced panels of time series},
number = {8},
pages = {1358--1368},
title = {{Maximum likelihood estimation for dynamic factor models with missing data}},
volume = {35},
year = {2011}
}
@article{Ng2013a,
abstract = {This paper provides a survey of business cycle facts, updated to take account of recent data. Emphasis is given to the Great Recession, which was unlike most other postwar recessions in the United States in being driven by deleveraging and financial market factors. We document how recessions with financial market origins are different from those driven by supply or monetary policy shocks. This helps explain why economic models and predictors that work well at some times do poorly at other times. We discuss challenges for forecasters and empirical researchers in light of the updated business cycle facts.},
author = {Ng, Serena and Wright, Jonathan H.},
doi = {10.1257/jel.51.4.1120},
file = {:E$\backslash$:/Dropbox/phd/forecast/Facts and Challenges from the Great Recession for.pdf:pdf},
issn = {0022-0515},
journal = {Journal of Economic Literature},
number = {4},
pages = {1120--1154},
title = {{Facts and Challenges from the Great Recession for Forecasting and Macroeconomic Modeling}},
volume = {51},
year = {2013}
}
@article{DeMol2008,
abstract = {This paper considers Bayesian regression with normal and double-exponential priors as forecasting methods based on large panels of time series. We show that, empirically, these forecasts are highly correlated with principal component forecasts and that they perform equally well for a wide range of prior choices. Moreover, we study conditions for consistency of the forecast based on Bayesian regression as the cross-section and the sample size become large. This analysis serves as a guide to establish a criterion for setting the amount of shrinkage in a large cross-section.},
author = {{De Mol}, Christine and Giannone, Domenico and Reichlin, Lucrezia},
doi = {10.1016/j.jeconom.2008.08.011},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/De Mol, Giannone, Reichlin - 2008 - Forecasting using a large number of predictors Is Bayesian shrinkage a valid alternative to principa.pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
keywords = {Bayesian VAR,Bayesian shrinkage,C11,C13,C33,C53,Large cross-sections,Lasso regression,Principal components,Ridge regression},
month = oct,
number = {2},
pages = {318--328},
title = {{Forecasting using a large number of predictors: Is Bayesian shrinkage a valid alternative to principal components?}},
url = {http://www.sciencedirect.com/science/article/pii/S0304407608001103},
volume = {146},
year = {2008}
}
@article{Bates2013a,
abstract = {This paper considers the estimation of approximate dynamic factor models when there is temporal instability in the factor loadings. We characterize the type and magnitude of instabilities under which the principal components estimator of the factors is consistent and find that these instabilities can be larger than earlier theoretical calculations suggest. We also discuss implications of our results for the robustness of regressions based on the estimated factors and of estimates of the number of factors in the presence of parameter instability. Simulations calibrated to an empirical application indicate that instability in the factor loadings has a limited impact on estimation of the factor space and diffusion index forecasting, whereas estimation of the number of factors is more substantially affected.},
author = {Bates, Brandon J. and Plagborg-M\o ller, Mikkel and Stock, James H. and Watson, Mark W.},
doi = {10.1016/j.jeconom.2013.04.014},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/factors/Consistent factor estimation in dynamic factor models with structural instability.pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
month = dec,
number = {2},
pages = {289--304},
title = {{Consistent factor estimation in dynamic factor models with structural instability}},
url = {http://www.sciencedirect.com/science/article/pii/S0304407613000912},
volume = {177},
year = {2013}
}
@article{Bencivelli2010,
author = {Bencivelli, Lorenzo and Marcellino, Massimiliano and Moretti, Gianluca},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Bencivelli, Marcellino, Moretti - Unknown - Selecting predictors by using Bayesian model averaging in bridge models.pdf:pdf},
journal = {Bank of Italy Temi di Discussione},
title = {{Temi di Discussione}},
year = {2010}
}
@article{Blasques2014,
author = {Blasques, Francisco and Koopman, Siem Jan and Mallee, Max},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/Paper\_Blasques2014g.pdf:pdf},
title = {{Low Frequency and Weighted Likelihood Solutions for Mixed Frequency Dynamic Factor Models}},
year = {2014}
}
@article{Brodersen2014,
author = {Brodersen, By Kay H and Gallusser, Fabian and Koehler, Jim and Remy, Nicolas and Scott, Steven L},
file = {:E$\backslash$:/Dropbox/phd/bsts/INFERRING CAUSAL IMPACT USING BSTS.pdf:pdf},
journal = {Annals of Applied Statistics},
title = {{Inferring causal impact using Bayesian structural time-series models}},
year = {2014}
}
@article{DeMol2006,
abstract = {This paper considers Bayesian regression with normal and doubleexponential priors as forecasting methods based on large panels of time series. We show that, empirically, these forecasts are highly correlated with principal component forecasts and that they perform equally well for a wide range of prior choices. Moreover, we study the asymptotic properties of the Bayesian regression under Gaussian prior under the assumption that data are quasi collinear to establish a criterion for setting parameters in a large cross-section.},
author = {{De Mol}, Christine and Giannone, Domenico and Reichlin, Lucrezia},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/De Mol, Giannone, Reichlin - 2006 - Forecasting using a large number of predictors is Bayesian regression a valid alternative to princip.pdf:pdf},
journal = {Discussion Paper Series 1: Economic Studies},
keywords = {Bayesian VAR,Lasso regression,large cross-sections,principal components,ridge regression},
publisher = {Deutsche Bundesbank, Research Centre},
title = {{Forecasting using a large number of predictors: is Bayesian regression a valid alternative to principal components?}},
url = {http://ideas.repec.org/p/zbw/bubdp1/5040.html},
year = {2006}
}
@article{Schumacher2014,
author = {Schumacher, Christian},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/bridgeequation/MIDAS and bridge equations.pdf:pdf},
title = {{MIDAS and bridge equations}},
year = {2014}
}
@article{Stock,
abstract = {This article provides a simple shrinkage representation that describes the operational characteristics of various forecasting methods designed for a large number of orthogonal predictors (such as principal components). These methods include pretest methods, Bayesian model averaging, empirical Bayes, and bagging. We compare empirically forecasts from these methods with dynamic factor model (DFM) forecasts using a U.S. macroeconomic dataset with 143 quarterly variables spanning 1960–2008. For most series, including measures of real economic activity, the shrinkage forecasts are inferior to the DFM forecasts. This article has online supplementary material.},
author = {Stock, James H and Watson, Mark W},
doi = {10.1080/07350015.2012.715956},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Stock, Watson - Unknown - Generalized Shrinkage Methods for Forecasting Using Many Predictors.pdf:pdf},
keywords = {Dynamic factor models,Empirical Bayes,High-dimensional model},
title = {{Generalized Shrinkage Methods for Forecasting Using Many Predictors}},
url = {http://tandfonline.com/r/JBES}
}
@article{Wohlrabe2014,
abstract = {The use of large datasets for macroeconomic forecasting has received a great deal of interest recently. Boosting is one possible method of using high-dimensional data for this purpose. It is a stage-wise additive modelling procedure, which, in a linear specification, becomes a variable selection device that iteratively adds the predictors with the largest contribution to the fit. Using data for the United States, the euro area and Germany, we assess the performance of boosting when forecasting a wide range of macroeconomic variables. Moreover, we analyse to what extent its forecasting accuracy depends on the method used for determining its key regularisation parameter, the number of iterations. We find that boosting mostly outperforms the autoregressive benchmark, and that \$K\$-fold cross-validation works much better as stopping criterion than the commonly used information criteria. (This abstract was borrowed from another version of this item.)},
author = {Wohlrabe, Klaus and Buchen, Teresa},
file = {:E$\backslash$:/Dropbox/phd/shrinkage/boosting/Assessing the Macroeconomic Forecasting Performance of Boosting Evidence for the United States, the Euro Area, and Germany.pdf:pdf},
journal = {Journal of Forecasting},
number = {4},
pages = {231--242},
publisher = {John Wiley \& Sons, Ltd.},
title = {{Assessing the Macroeconomic Forecasting Performance of Boosting: Evidence for the United States, the Euro Area and Germany}},
url = {http://ideas.repec.org/a/wly/jforec/v33y2014i4p231-242.html},
volume = {33},
year = {2014}
}
@book{Wooldridge2002,
abstract = {This graduate text provides an intuitive but rigorous treatment of contemporary methods used in microeconometric research. The book makes clear that applied microeconometrics is about the estimation of marginal and treatment effects, and that parametric estimation is simply a means to this end. It also clarifies the distinction between causality and statistical association.The book focuses specifically on cross section and panel data methods. Population assumptions are stated separately from sampling assumptions, leading to simple statements as well as to important insights. The unified approach to linear and nonlinear models and to cross section and panel data enables straightforward coverage of more advanced methods. The numerous end-of-chapter problems are an important component of the book. Some problems contain important points not fully described in the text, and others cover new ideas that can be analyzed using tools presented in the current and previous chapters. Several problems require the use of the data sets located at the author's website.},
author = {Wooldridge, Jeffrey M},
booktitle = {booksgooglecom},
doi = {10.1515/humr.2003.021},
isbn = {0262232197},
issn = {09331719},
pages = {752},
title = {{Econometric Analysis of Cross Section and Panel Data}},
url = {http://books.google.com/books?hl=en\&lr=\&id=cdBPOJUP4VsC\&oi=fnd\&pg=PA1\&dq=Econometric+Analysis+of+Cross+Section+and+Panel+Data\&ots=jabbMTk6sf\&sig=lBc6Dsy959N2dTJD42bbNQoBzEY},
volume = {58},
year = {2002}
}
@book{Cameron2005,
author = {Cameron, A Colin and Trivedi, Pravin K},
booktitle = {Analysis},
doi = {10.1016/S0304-4076(00)00050-6},
institution = {Center for Biomass Resource Utilization, College of Engineering, China Agricultural University (East Campus), 17 Qing-Hua-Dong-Lu, Hai-Dian District, Beijing 100083, PR China.},
isbn = {9780521848053},
issn = {03044076},
number = {1},
pages = {1056},
pmid = {20074927},
publisher = {Cambridge University Press},
series = {Cambridge Books},
title = {{Microeconometrics: Methods and Applications}},
url = {http://books.google.com/books?hl=en\&lr=\&id=Zf0gCwxC9ocC\&oi=fnd\&pg=PR15\&dq=Microeconometrics+Methods+and+Applications\&ots=CY25lK3EqT\&sig=EU9ni1rMTBFUc25MBWG3nuKLol0},
volume = {100},
year = {2005}
}
@article{Durbin2012,
author = {Durbin, J and Koopman, Sj},
file = {:E$\backslash$:/Dropbox/phd/statespace/[Durbin\_J.,\_Koopman\_S.J.]\_Time\_Series\_Analysis\_by\_(BookZZ.org).pdf:pdf},
title = {{Time series analysis by state space methods}},
url = {http://books.google.com/books?hl=en\&lr=\&id=fOq39Zh0olQC\&oi=fnd\&pg=PP2\&dq=time+series+analysis+by+state+space+methods\&ots=o95cm4AVyi\&sig=2jtVmFvgu0ZJ8d0UOE8skg0CBr4$\backslash$nhttp://books.google.com/books?hl=en\&lr=\&id=fOq39Zh0olQC\&oi=fnd\&pg=PP2\&dq=time+series+analy},
year = {2012}
}
@article{Ferrara2013,
author = {Ferrara, Laurent and Marsilli, Cl\'{e}ment},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/MidasVariableSelection\_Oct13.pdf:pdf},
keywords = {and do not reflect,are solely,banque de france,bayesian variable selection,forecasting,gong cheng for his,helpful comments,lasso,midas,thank arnaud dufays,the views expressed herein,the views of the,those of the authors,we would like to},
title = {{Variable selection with mixed frequencies : an assessment based on macroeconomic forecasting}},
year = {2013}
}
@article{Ferrara2014,
abstract = {The Great Recession endured by the main industrialized countries during the period 2008-2009, in the wake of the financial and banking crisis, has pointed out the major role of the financial sector on macroeconomic fluctuations. In this respect, many researchers have started to reconsider the linkages between financial and macroeconomic areas. In this paper, we evaluate the leading role of the daily volatility of two major financial variables, namely commodity and stock prices, in their ability to anticipate the output growth. For this purpose, we propose an extended MIDAS model that allows the forecasting of the quarterly output growth rate using exogenous variables sampled at various higher frequencies. Empirical results on three industrialized countries (US, France, and UK) show that mixing daily financial volatilities and monthly industrial production is useful at the time of predicting gross domestic product growth over the Great Recession period. © 2013 Elsevier B.V.},
author = {Ferrara, Laurent and Marsilli, Cl\'{e}ment and Ortega, Juan Pablo},
doi = {10.1016/j.econmod.2013.08.042},
issn = {02649993},
journal = {Economic Modelling},
keywords = {Financial variables,Forecasting,Great Recession,MIDAS approach,Volatility},
pages = {44--50},
publisher = {Elsevier B.V.},
title = {{Forecasting growth during the Great Recession: Is financial volatility the missing ingredient?}},
url = {http://dx.doi.org/10.1016/j.econmod.2013.08.042},
volume = {36},
year = {2014}
}
@article{Koop2004a,
author = {Koop, Gary and Potter, Simon},
doi = {10.1111/j.1368-423X.2004.00143.x},
issn = {1368-4221},
journal = {The Econometrics Journal},
month = dec,
number = {2},
pages = {550--565},
title = {{Forecasting in dynamic factor models using Bayesian model averaging}},
url = {http://doi.wiley.com/10.1111/j.1368-423X.2004.00143.x},
volume = {7},
year = {2004}
}
@book{Petris2008,
author = {Petris, Giovanni and Petrone, Sonia and Campagnoli, Patrizia},
file = {:E$\backslash$:/Dropbox/phd/statespace/Dynamic Linear Models with R[Giovanni\_Petris,\_Sonia\_Petrone,\_Patrizia\_Campagno(BookZZ.org).pdf:pdf},
isbn = {978-0-387-77237-0},
pages = {252},
title = {{Dynamic Linear Models with R}},
url = {http://www.springer.com/statistics/statistical+theory+and+methods/book/978-0-387-77237-0},
year = {2008}
}
@article{Stock2006,
author = {Stock, Jh and Watson, Mw},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/factor/dfm\_oup\_4.pdf:pdf},
isbn = {9780195398649},
journal = {Oxford Handbook of Economic Forecasting},
number = {January},
pages = {1--43},
title = {{Dynamic factor models}},
url = {http://link.springer.com/article/10.1007/s10182-006-0219-z},
year = {2006}
}
@article{Stock2012,
abstract = {This paper provides a simple shrinkage representation that describes the operational characteristics of various forecasting methods designed for a large number of orthogonal predictors (such as principal components). These methods include pretest methods, Bayesian model averaging, empirical Bayes, and bagging. We compare empirically forecasts from these methods to dynamic factor model (DFM) forecasts using a U.S. macroeconomic data set with 143 quarterly variables spanning 1960-2008. For most series, including measures of real economic activity, the shrinkage forecasts are inferior to the DFM forecasts},
author = {Stock, Jh and Watson, Mw},
doi = {10.1080/07350015.2012.715956},
file = {:E$\backslash$:/Dropbox/phd/statespace/dfm/dfmdata/stock\_watson\_generalized\_shrinkage\_supplement\_June\_2012.pdf:pdf},
isbn = {0735-0015$\backslash$r1537-2707},
issn = {0735-0015},
journal = {Journal of Business and Economic Statistics},
keywords = {dynamic factor models,empirical Bayes,high dimensional model},
number = {4},
pages = {481--493},
title = {{Generalized shrinkage methods for forecasting using many predictors}},
url = {http://amstat.tandfonline.com/doi/abs/10.1080/07350015.2012.715956},
volume = {30},
year = {2012}
}
@article{Taieb2014,
abstract = {Multi-step forecasts can be produced recursively by iterating a one-step model, or directly using a specific model for each horizon. Choosing between these two strategies is not an easy task since it involves a trade-off between bias and estimation variance over the forecast horizon. Using a nonlinear machine learning model makes the tradeoff even more difficult. To address this issue, we propose a new forecasting strategy which boosts traditional recursive linear forecasts with a direct strategy using a boosting autoregression procedure at each horizon. First, we investigate the performance of the proposed strategy in terms of bias and variance decomposition of the error using simulated time series. Then, we evaluate the proposed strategy on real-world time series from two forecasting competitions. Overall, we obtain excellent performance with respect to the standard forecasting strategies.},
author = {Taieb, Souhaib Ben and Hyndman, Rob J},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Taieb, Hyndman - 2014 - Boosting multi-step autoregressive forecasts.pdf:pdf},
journal = {Monash Econometrics and Business Statistics Working Papers},
keywords = {Multi-step forecasting,boosting,direct forecasting,forecasting strategies,linear time series,nonlinear time series,recursive forecasting},
publisher = {Monash University, Department of Econometrics and Business Statistics},
title = {{Boosting multi-step autoregressive forecasts}},
url = {http://ideas.repec.org/p/msh/ebswps/2014-13.html},
year = {2014}
}
@article{Toda1995,
abstract = {This paper shows how we can estimate VAR's formulated in levels and test general restrictions on the parameter matrices even if the processes may be integrated or cointegrated of an arbitrary order. We can apply a usual lag selection procedure to a possibly integrated or cointegrated VAR since the standard asymptotic theory is valid (as far as the order of integration of the process does not exceed the true lag length of the model). Having determined a lag length k, we then estimate a (k + dmax)th-order VAR where dmax is the maximal order of integration that we suspect might occur in the process. The coefficient matrices of the last dmax lagged vectors in the model are ignored (since these are regarded as zeros), and we can test linear or nonlinear restrictions on the first k coefficient matrices using the standard asymptotic theory.},
author = {Toda, Hiro Y. and Yamamoto, Taku},
doi = {10.1016/0304-4076(94)01616-8},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Toda, Yamamoto - 1995 - Statistical inference in vector autoregressions with possibly integrated processes.pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
keywords = {C32,Cointegration,Hypothesis testing,Lag order selection,Unit roots,VAR test for Granger causality,Vector autoregressions},
mendeley-tags = {VAR test for Granger causality},
month = mar,
number = {1-2},
pages = {225--250},
title = {{Statistical inference in vector autoregressions with possibly integrated processes}},
url = {http://www.sciencedirect.com/science/article/pii/0304407694016168},
volume = {66},
year = {1995}
}
@article{VanOudenhoven2002,
abstract = {The present study considered the reliability and validity of the 78-item revised version of the Multicultural Personality Questionnaire, a multidimensional instrument aimed at measuring multicultural effectiveness of expatriate employees and students. The questionnaire includes scales for cultural empathy, open-mindedness, emotional stability, social initiative and flexibility. Participants were native and foreign students of an international business school (N=171) in the Netherlands. The MPQ scales appeared to be more strongly predictive of adjustment of international students as compared to native students. Moreover, the instrument was able to explain variance in students’ adjustment beyond self-efficacy.},
author = {{Van Oudenhoven}, Jan Pieter and {Van der Zee}, Karen I},
doi = {10.1016/S0147-1767(02)00041-X},
issn = {01471767},
journal = {International Journal of Intercultural Relations},
keywords = {Cultural empathy,Emotional stability,Flexibility,International students,Multicultural Personality Questionnaire,Multicultural effectiveness,Open-mindedness,Social Initiative},
month = nov,
number = {6},
pages = {679--694},
title = {{Predicting multicultural effectiveness of international students: the Multicultural Personality Questionnaire}},
url = {http://www.sciencedirect.com/science/article/pii/S014717670200041X},
volume = {26},
year = {2002}
}
@article{Vanhoucke2004,
author = {Vanhoucke, Mario},
isbn = {9789251081518},
number = {January},
title = {{Working Paper}},
year = {2004}
}
@misc{Acemoglu2001,
abstract = {We exploit differences in European mortality rates to estimate the effect of institutions on economic performance. Europeans adopted very different colonization policies in different colonies, with different associated institutions. In places where Europeans faced high mortality rates, they could not settle and were more likely to set up extractive institutions. These institutions persisted to the present. Exploiting differences in European mortality rates as an instrument for current institutions, we estimate large effects of institutions on income per capita. Once the effect of institutions is controlled for, countries in Africa or those closer to the equator do not have lower incomes.},
author = {Acemoglu, Daron and Johnson, Simon and Robinson, James A},
booktitle = {American Economic Review},
doi = {10.1257/aer.91.5.1369},
isbn = {00028282},
issn = {0002-8282},
number = {5},
pages = {1369--1401},
pmid = {17746758},
title = {{The Colonial Origins of Comparative Development: An Empirical Investigation}},
volume = {91},
year = {2001}
}
@article{Doz2006,
abstract = {This paper considers quasi-maximum likelihood estimations of a dynamic ap- proximate factor model when the panel of time series is large. Maximum likelihood is analyzed under different sources of misspecification: omitted serial correlation of the observations and cross-sectional correlation of the idiosyncratic components. It is shown that the effects ofmisspecification on the estimation of the common factors is negligible for large sample size (T) and the cross-sectional dimension (n). The estimator is feasible when n is large and easily implementable using the Kalman smoother and theEMalgorithm as in traditional factor analysis. Simulation results illustrate what are the empirical conditions in which we can expect improvement with respect to simple principle components considered by Bai (2003), Bai and Ng (2002), Forni, Hallin, Lippi, and Reichlin (2000, 2005b), Stock and Watson (2002a,b).},
author = {Doz, Catherine and Giannone, Domenico and Reichlin, Lucrezia},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/DYNAMIC FACTORecbwp674.pdf:pdf},
keywords = {Factor Model,Quasi Maximum Likelihood.,large cross-sections},
number = {674},
title = {{A quasi maximum likelihood approach for large approximate dynamic factor models}},
year = {2006}
}
@article{Foroni2014b,
author = {Foroni, Claudia and Gu, Pierre and Marcellino, Massimiliano},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Markov-Switching Mixed-Frequency VAR Models.pdf:pdf},
keywords = {fore-,markov-switching,midas,mixed-frequency var,nowcasting},
title = {{Markov-Switching Mixed-Frequency VAR Models ∗}},
year = {2014}
}
@misc{Hendry2011,
abstract = {To forecast an aggregate, we propose adding disaggregate variables, instead of combining forecasts of those disaggregates or forecasting by a univariate aggregate model. New analytical results show the effects of changing coefficients, mis-specification, estimation uncertainty and mis-measurement error. Forecastorigin shifts in parameters affect absolute, but not relative, forecast accuracies; mis-specification and estimation uncertainty induce forecast-error differences, which variable-selection procedures or dimension reductions can mitigate. In Monte Carlo simulations, different stochastic structures and interdependencies between disaggregates imply that including disaggregate information in the aggregate model improves forecast accuracy. Our theoretical predictions and simulations are corroborated when forecasting aggregate US inflation pre- and post 1984 using disaggregate sectoral data. JEL Classification: C51, C53, E31.<P>(This abstract was borrowed from another version of this item.)},
author = {Hendry, David F. and Hubrich, Kirstin},
booktitle = {Journal of Business \& Economic Statistics},
doi = {10.1198/jbes.2009.07112},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hendry, Hubrich - 2011 - Combining Disaggregate Forecasts or Combining Disaggregate Information to Forecast an Aggregate.pdf:pdf},
issn = {0735-0015},
title = {{Combining Disaggregate Forecasts or Combining Disaggregate Information to Forecast an Aggregate}},
year = {2011}
}
@article{Hendry2014,
abstract = {Big Data offer potential benefits for statistical modelling, but confront problems like an excess of false positives, mistaking correlations for causes, ignoring sampling biases, and selecting by inappropriate methods.� We consider the many important requirements when searching for a data-based relationship using Big Data, and the possible role of Autometrics in that context.� Paramount considerations include embedding relationships in general initial models, possibly restricting the number of variables to be selected over by non-statistical criteria (the formulation problem), using good quality data on all variables, analyzed with tight significance levels by a powerful selection procedure, retaining available theory insights (the selection problem) while testing for relationships being well specified and invariant to shifts in explanatory variables (the evaluation problem), using a viable approach that resolves the computational problem of immense numbers of possible models.},
author = {Hendry, David and Doornik, Jurgen A.},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hendry, Doornik - 2014 - Statistical Model Selection with 'Big Data'.pdf:pdf},
journal = {Economics Series Working Papers},
keywords = {Autometrics,Big Data,Location Shifts,Model Selection},
month = dec,
publisher = {University of Oxford, Department of Economics},
title = {{Statistical Model Selection with 'Big Data'}},
url = {http://ideas.repec.org/p/oxf/wpaper/735.html},
year = {2014}
}
@article{Marcellino2007,
author = {Marcellino, Massimiliano and Bocconi, Universit\`{a} and Schumacher, Christian},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/Factor nowcasting of German GDP with ragged-edge.pdf:pdf},
keywords = {authors,business cycle,data,ect the views of,large factor models,midas,missing values,mixed-frequency,not necessarily re,nowcasting,personal opinions and does,the,this paper represents the},
pages = {1--45},
title = {{Factor nowcasting of German GDP with ragged-edge data : A model comparison using MIDAS projections}},
year = {2007}
}
@article{Mourougane2006,
abstract = {The objective of this paper is to develop a short-term indicator-based model to predict quarterly GDP in Canada by efficiently exploiting all available monthly information. To this aim, monthly forecasting equations are estimated using the GDP series published every month by Statistics Canada as well as other monthly indicators. The procedures are automated and the model can be run whenever major monthly data are released, allowing the appropriate choice of the model according to the information set available. The most important gain from this procedure is for the current-quarter forecast when one or two months of GDP data are available, with all monthly models estimated in the paper outperforming a standard quarterly autoregressive model in terms of size of errors. The use of indicators also appears to improve forecasting performance, especially when an average of indicator-based models is used. Real-time forecasting performance of the average model appear to be good, with an apparent stability of the estimates from one update to the next, despite the extensive use of monthly data. The latter result should nonetheless be interpreted with caution and will need to be re-assessed when more data become available.},
author = {Mourougane, Annabelle},
file = {:E$\backslash$:/Dropbox/phd/forecast/Canada/Forecasting Monthly GDP For Canada.pdf:pdf},
number = {515},
title = {{Forecasting monthly GDP for Canada}},
url = {http://ideas.repec.org/p/oec/ecoaaa/515-en.html},
year = {2006}
}
@article{Uhl2011,
abstract = {In this paper we extend the targeted-regressor approach suggested in Bai and Ng (2008) for variables sampled at the same frequency to mixed-frequency data. Our MIDASSO approach is a combination of the unrestricted MIxed-frequency DAta-Sampling approach (U-MIDAS) (see Foroni et al., 2015; Castle et al., 2009; Bec and Mogliani, 2013), and the LASSO-type penalised regression used in Bai and Ng (2008), called the elastic net (Zou and Hastie, 2005). We illustrate our approach by forecasting the quarterly real GDP growth rate in Switzerland.},
author = {Siliverstovs, Boriss},
file = {:E$\backslash$:/Dropbox/phd/forecast/Short-term forecasting with mixed-frequency data A MIDASSO approach.pdf:pdf},
title = {{Short-term forecasting with mixed-frequency data: A MIDASSO approach}},
url = {http://www.kof.ethz.ch/en/publications/p/kof-working-papers/158/},
year = {2015}
}
@article{Time1997,
author = {Time, Structural and Models, Series and Filter, Kalman},
number = {1994},
title = {{State Space Models and the Kalman Filter}},
year = {1997}
}
@article{Bayesian2000,
author = {Bayesian, Applied},
number = {1997},
title = {{11 . Time series and dynamic linear models}},
year = {2000}
}
@misc{Jungbacker2011,
abstract = {This paper concerns estimating parameters in a high-dimensional dynamic factor model by the method of maximum likelihood. To accommodate missing data in the analysis, we propose a new model representation for the dynamic factor model. It allows the Kalman filter and related smoothing methods to evaluate the likelihood function and to produce optimal factor estimates in a computationally efficient way when missing data is present. The implementation details of our methods for signal extraction and maximum likelihood estimation are discussed. The computational gains of the new devices are presented based on simulated data sets with varying numbers of missing entries.},
author = {Jungbacker, B. and Koopman, S.J. and van der Wel, M.},
booktitle = {Journal of Economic Dynamics and Control},
doi = {10.1016/j.jedc.2011.03.009},
issn = {01651889},
number = {8},
pages = {1358--1368},
title = {{Maximum likelihood estimation for dynamic factor models with missing data}},
volume = {35},
year = {2011}
}
@article{Kahneman1979,
abstract = {This paper presents a critique of expected utility theory as a descriptive model of decision making under risk, and develops an alternative model, called prospect theory. Choices among risky prospects exhibit several pervasive effects that are inconsistent with the basic tenets of utility theory. In particular, people underweight outcomes that are merely probable in comparison with outcomes that are obtained with certainty. This tendency, called the certainty effect, contributes to risk aversion in choices involving sure gains and to risk seeking in choices involving sure losses. In addition, people generally discard components that are shared by all prospects under consideration. This tendency, called the isolation effect, leads to inconsistent preferences when the same choice is presented in different forms. An alternative theory of choice is developed, in which value is assigned to gains and losses rather than to final assets and in which probabilities are replaced by decision weights. The value function is normally concave for gains, commonly convex for losses, and is generally steeper for losses than for gains. Decision weights are generally lower than the corresponding probabilities, except in the range of low prob- abilities. Overweighting of low probabilities may contribute to the attractiveness of both insurance and gambling. 1.},
author = {Kahneman, Daniel and Tversky, Amos},
doi = {10.2307/1914185},
isbn = {0521627494},
issn = {00129682},
journal = {Econometrica},
number = {2},
pages = {263--292},
pmid = {1914185},
title = {{Prospect Theory: An Analysis of Decision under Risk}},
url = {http://medcontent.metapress.com/index/A65RM03P4874243N.pdf},
volume = {47},
year = {1979}
}
@article{Stock2004,
abstract = {This paper provides a simple shrinkage representation that describes the operational characteristics of various forecasting methods that are applicable when there are a large number of orthogonal predictors (such as principal components). These methods include pretest methods, Bayesian model averaging, empirical Bayes, and bagging. We then compare these and other many-predictor forecasting methods in the context of macroeconomic forecasting (real activity and inflation) using 131 monthly predictors with monthly U.S. economic time series data, 1959:1 -2003:12. The theoretical shrinkage representations serve to inform our empirical comparison of these forecasting methods.},
author = {Stock, James H and Watson, Mark W and Thank, We and Boivin, Jean and Giannone, Domenico and Kilian, Lutz and Ng, Serena and Reichlin, Lucrezia and Steele, Mark and Wright, Jonathan},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Stock et al. - 2004 - AN EMPIRICAL COMPARISON OF METHODS FOR FORECASTING USING MANY PREDICTORS.pdf:pdf},
title = {{AN EMPIRICAL COMPARISON OF METHODS FOR FORECASTING USING MANY PREDICTORS}},
year = {2004}
}
@article{Castle2012,
abstract = {We consider forecasting with factors, variables and both, modeling in-sample using Autometrics so all principal components and variables can be included jointly, while tackling multiple breaks by impulse-indicator saturation.� A forecast-error taxonomy for factor models highlights the impacts of location shifts on forecast-error biases.� Forecasting US GDP over 1-, 4- and 8-step horizons using the dataset from Stock and Watson (2009) updated to 2011:2 shows factor models are more useful for nowcasting or short-term forecasting, but their relative performance declines as the forecast horizon increases.� Forecasts for GDP levels highlight the need for robust strategies such as intercept corrections or differencing when location shifts occur, as in the recent financial crisis.},
author = {Castle, Jennifer and Hendry, David},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Castle, Hendry - 2012 - Forecasting by factors, by variables, or both.pdf:pdf},
journal = {Economics Series Working Papers},
keywords = {Autometrics,Factor models,Forecasting,Impulse-indicator saturation,Model selection},
month = apr,
publisher = {University of Oxford, Department of Economics},
title = {{Forecasting by factors, by variables, or both?}},
url = {http://ideas.repec.org/p/oxf/wpaper/600.html},
year = {2012}
}
@article{Eiswerth2007,
author = {Eiswerth, Mark E. and van Kooten, G. Cornelis},
doi = {10.1111/j.1744-7976.2007.00104.x},
issn = {0008-3976},
journal = {Canadian Journal of Agricultural Economics/Revue canadienne d'agroeconomie},
month = dec,
number = {4},
pages = {485--498},
title = {{Dynamic Programming and Learning Models for Management of a Nonnative Species}},
url = {http://doi.wiley.com/10.1111/j.1744-7976.2007.00104.x},
volume = {55},
year = {2007}
}
@article{Giacomini,
author = {Giacomini, Raffaella},
file = {:E$\backslash$:/Dropbox/phd/forecast/economicTheoryForecast.pdf:pdf},
title = {{Economic theory and forecasting : lessons from the literature Economic theory and forecasting : lessons from the literature}}
}
@article{Ortega2014,
author = {Ortega, Juan-pablo and Bauwens, Luc},
title = {{Mixed-frequency modeling and economic forecasting}},
year = {2014}
}
@article{Tusell2011,
abstract = {Support in R for state space estimation viaKalman filtering was limited to one package, until fairly recently. In the last five years, the situation has changed with no less than four additional packages offering general implementations of theKalman filter, including in some cases smoothing, simulation smoothing and other functionality. This paper reviews some of the offerings in R to help the prospective user to make an informed choice.},
author = {Tusell, Fernando},
issn = {15487660},
journal = {Journal of Statistical Software},
keywords = {kalman filter,r,state space models,time series},
number = {2},
pages = {1--27},
pmid = {18291371},
title = {{Journal of Statistical Software}},
url = {http://www.jstatsoft.org/v39/i02/paper},
volume = {39},
year = {2011}
}
@book{Greene2003,
abstract = {Econometric Analysisi, 6/eserves as a bridge between an introduction to the field of econometrics and the professional literature for social scientists and other professionals in the field of social sciences, focusing on applied econometrics and theoretical background. This book provides a broad survey of the field of econometrics that allows the reader to move from here to practice in one or more specialized areas. At the same time, the reader will gain an appreciation of the common foundation of all the fields presented and use the tools they employ.This book gives space to a wide range of topics including basic econometrics, Classical, Bayesian, GMM, and Maximum likelihood, and gives special emphasis to new topics such a time series and panels.For social scientists and other professionals in the field who want a thorough introduction to applied econometrics that will prepare them for advanced study and practice in the field.},
author = {Greene, William H},
booktitle = {Journal of the American Statistical Association},
doi = {10.1198/jasa.2002.s458},
editor = {Education, Pearson},
isbn = {0130661899},
issn = {01621459},
number = {457},
pages = {1026},
pmid = {21414993},
publisher = {Prentice Hall},
title = {{Econometric Analysis}},
url = {http://pubs.amstat.org/doi/abs/10.1198/jasa.2002.s458},
volume = {97},
year = {2003}
}
@article{Harvey,
abstract = {By setting up a suitable time series model in state space form, the latest estimate of the underlying current change in a series may be computed by the Kalman ®lter. This may be done even if the observations are only available in a time-aggregated form subject to survey sampling error. A related series, possibly observed more frequently, may be used to improve the estimate of change further. The paper applies these techniques to the important problem of estimating the underlying monthly change in unemployment in the UK measured according to the de®nition of the International Labour Organisation by the Labour Force Survey. The ®tted models suggest a reduction in root-mean-squared error of around 10\% over a simple estimate based on differences if a univariate model is used and a further reduction of 50\% if information on claimant counts is taken into account. With seasonally unadjusted data, the bivariate model offers a gain of roughly 40\% over the use of annual differences. For both adjusted and unadjusted data, there is a further gain of around 10\% if the next month's ®gure on claimant counts is used. The method preferred is based on a bivariate model with unadjusted data. If the next month's claimant count is known, the root-mean-squared error for the estimate of change is just over 10 000.},
author = {Harvey, Andrew and Chung, Chia-Hui},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Harvey, Chung - Unknown - Estimating the underlying change in unemployment in the UK.pdf:pdf},
keywords = {Co-integration,Common trends,Kalman ®lter,Mixed frequency data,Rotating sample,Survey data},
title = {{Estimating the underlying change in unemployment in the UK}}
}
@article{Matter2009b,
author = {Marsilli, Cl\'{e}ment},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/variableselection/c\_marsilli\_Nov\_2014.pdf:pdf},
number = {December},
title = {{VARIABLE SELECTION IN PREDICTIVE MIDAS MODELS}},
year = {2014}
}
@article{,
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Unknown - Unknown - Cochrane Perm and Trans GNP and stocks (QJE).pdf:pdf},
title = {{Cochrane Perm and Trans GNP and stocks (QJE)}}
}
@article{Andreou2013a,
author = {Andreou, Elena and Ghysels, Eric and Kourtellos, Andros},
doi = {10.1080/07350015.2013.767199},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Andreou, Ghysels, Kourtellos - 2013 - Should Macroeconomic Forecasters Use Daily Financial Data and How(3).pdf:pdf},
isbn = {0735-0015$\backslash$r1537-2707},
issn = {0735-0015},
journal = {Journal of Business \& Economic Statistics},
keywords = {daily financial factors,financial markets and the,macroeconomy,midas regressions},
number = {2},
pages = {240--251},
title = {{Should Macroeconomic Forecasters Use Daily Financial Data and How?}},
url = {http://www.tandfonline.com/doi/abs/10.1080/07350015.2013.767199},
volume = {31},
year = {2013}
}
@book{NBERgree13-1,
author = {{Avi Goldfarb} and {Shane Greenstein} and {Catherine Tucker}},
institution = {National Bureau of Economic Research},
publisher = {University of Chicago Press},
title = {{Economic Analysis of the Digital Economy}},
url = {http://www.nber.org/books/gree13-1},
year = {2015}
}
@article{Bai2012,
author = {Bai, Jushan and Wang, Peng},
doi = {10.1080/07350015.2014.941467},
keywords = {10027,columbia university,department of economics,dynamic factor models,impulse,multi-level factor model,new york,ny,response function,spill-over effects},
pages = {1--60},
title = {{Identification and Estimation of Dynamic Factor Models}},
year = {2012}
}
@article{BURA2010a,
author = {BURA, MARTA BAN´ and GIANNONEb, DOMENICO and REICHLINc, LUCREZIA},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/VAR/Large VARs Bandura Giannone Reichlin.pdf:pdf},
issn = {01451707},
journal = {JOURNAL OF APPLIED ECONOMETRICS},
number = {25},
pages = {71--92},
title = {{LARGE BAYESIAN VECTOR AUTO REGRESSIONS}},
volume = {25},
year = {2010}
}
@article{Clements2008,
abstract = {Many macroeconomic series, such as U.S. real output growth, are sampled quarterly, although potentially useful predictors are often observed at a higher frequency. We look at whether a mixed data-frequency sampling (MIDAS) approach can improve forecasts of output growth. The MIDAS specification used in the comparison uses a novel way of including an autoregressive term. We find that the use of monthly data on the current quarter leads to significant improvement in forecasting current and next quarter output growth, and that MIDAS is an effective way to exploit monthly data compared with alternative methods.},
author = {Clements, Michael P and Galv\~{a}o, Ana Beatriz},
doi = {10.1198/073500108000000015},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/AR-MIDAS/Forecasting Output Growth in the United States.pdf:pdf},
issn = {0735-0015},
journal = {Journal of Business \& Economic Statistics},
keywords = {Forecasting,Mixed-frequency data,U.S. output growth},
language = {en},
month = oct,
number = {4},
pages = {546--554},
publisher = {Taylor \& Francis},
title = {{Macroeconomic Forecasting With Mixed-Frequency Data}},
url = {http://www.tandfonline.com/doi/abs/10.1198/073500108000000015},
volume = {26},
year = {2008}
}
@article{Heckman1979,
abstract = {discusses the bias that results from using nonrandomly selected samples to estimate behavioral relationships as an ordinary specification error or “omitted variable” bias; simple consistent two-stage estimator is considered},
author = {Heckman, James J.},
doi = {10.2307/1912352},
isbn = {00129682},
issn = {00129682},
journal = {Econometrica},
number = {1},
pages = {153--161},
pmid = {1912352},
title = {{Sample Selection Bias as a Specification Error}},
volume = {47},
year = {1979}
}
@article{Hendry2012,
abstract = {Understanding the workings of whole economies is essential for sound policy advice - but not necessarily for accurate forecasts.� Structural models play a major role at most central banks and many other governmental agencies, yet almost none forecast the financial crisis and ensuing recession.� We focus on the problem of forecast failure that has become prominent during and after that crisis, and illustrate its sources and many surprising implications using a simple model.� An application to 'forecasting' UK GDP over 2008(1)-2011(2) is consistent with our interpretation.},
author = {Hendry, David and Mizon, Grayham E.},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hendry, Mizon - 2012 - Forecasting from Structural Econometric Models.pdf:pdf},
journal = {Economics Series Working Papers},
keywords = {Autometrics,Economic forecasting,Location shifts,Structural models},
month = mar,
publisher = {University of Oxford, Department of Economics},
title = {{Forecasting from Structural Econometric Models}},
url = {http://ideas.repec.org/p/oxf/wpaper/597.html},
year = {2012}
}
@article{Marcellino2007a,
author = {Marcellino, Massimiliano and Bocconi, Universit\`{a} and Schumacher, Christian},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/01 slide Factor nowcasting of German GDP.pdf:pdf},
keywords = {authors,business cycle,data,ect the views of,large factor models,midas,missing values,mixed-frequency,not necessarily re,nowcasting,personal opinions and does,the,this paper represents the},
pages = {1--45},
title = {{Factor nowcasting of German GDP with ragged-edge data : A model comparison using MIDAS projections}},
year = {2007}
}
@article{Marcellino2010,
abstract = {This paper compares different ways to estimate the current state of the economy using factor models that can handle unbalanced datasets. Due to the different release lags of business cycle indicators, data unbalancedness often emerges at the end of multivariate samples, which is sometimes referred to as the 'ragged edge' of the data. Using a large monthly dataset of the German economy, we compare the performance of different factor models in the presence of the ragged edge: static and dynamic principal components based on realigned data, the Expectation-Maximisation (EM) algorithm and the Kalman smoother in a state-space model context. The monthly factors are used to estimate current quarter GDP, called the 'nowcast', using different versions of what we call factor-based mixed-data sampling (Factor-MIDAS) approaches. We compare all possible combinations of factor estimation methods and Factor-MIDAS projections with respect to nowcast performance. Additionally, we compare the performance of the nowcast factor models with the performance of quarterly factor models based on time-aggregated and thus balanced data, which neglect the most timely observations of business cycle indicators at the end of the sample. Our empirical findings show that the factor estimation methods don't differ much with respect to nowcasting accuracy. Concerning the projections, the most parsimonious MIDAS projection performs best overall. Finally, quarterly models are in general outperformed by the nowcast factor models that can exploit ragged-edge data.},
author = {Marcellino, Massimiliano and Schumacher, Christian},
doi = {10.1111/j.1468-0084.2010.00591.x},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/Factor MIDAS for Nowcasting and Forecasting.pdf:pdf},
isbn = {0305-9049},
issn = {03059049},
journal = {Oxford Bulletin of Economics and Statistics},
number = {4},
pages = {518--550},
title = {{Factor MIDAS for nowcasting and forecasting with ragged-edge data: A model comparison for German GDP}},
volume = {72},
year = {2010}
}
@article{Stock2005a,
abstract = {This paper considers VAR models incorporating many time series that interact through a few dynamic factors. Several econometric issues are addressed including estimation of the number of dynamic factors and tests for the factor restrictions imposed on the VAR. Structural VAR identification based on timing restrictions, long run restrictions, and restrictions on factor loadings are discussed and practical computational methods suggested. Empirical analysis using U.S. data suggest several (7) dynamic factors, rejection of the exact dynamic factor model but support for an approximate factor model, and sensible results for a SVAR that identifies money policy shocks using timing restrictions.},
author = {Stock, J.H. and Watson, M.W.},
file = {:E$\backslash$:/Dropbox/phd/statespace/dfm/dfmdata/favar.pdf:pdf},
journal = {NBER working paper},
number = {June},
title = {{Implications of Dynamic Factor Models}},
url = {http://papers.ssrn.com/sol3/papers.cfm?abstract\_id=755703},
year = {2005}
}
@article{Varian,
abstract = {C C omputers are now involved in many economic transactions and can capture omputers are now involved in many economic transactions and can capture data associated with these transactions, which can then be manipulated data associated with these transactions, which can then be manipulated and analyzed. Conventional statistical and econometric techniques such and analyzed. Conventional statistical and econometric techniques such as regression often work well, but there are issues unique to big datasets that may as regression often work well, but there are issues unique to big datasets that may require different tools. require different tools. First, the sheer size of the data involved may require more powerful data First, the sheer size of the data involved may require more powerful data manipulation tools. Second, we may have more potential predictors than appro-manipulation tools. Second, we may have more potential predictors than appro-priate for estimation, so we need to do some kind of variable selection. Third, priate for estimation, so we need to do some kind of variable selection. Third, large datasets may allow for more fl exible relationships than simple linear models. large datasets may allow for more fl exible relationships than simple linear models. Machine learning techniques such as decision trees, support vector machines, Machine learning techniques such as decision trees, support vector machines, neural nets, deep learning, and so on may allow for more effective ways to model neural nets, deep learning, and so on may allow for more effective ways to model complex relationships. complex relationships. In this essay, I will describe a few of these tools for manipulating and analyzing In this essay, I will describe a few of these tools for manipulating and analyzing big data. I believe that these methods have a lot to offer and should be more widely big data. I believe that these methods have a lot to offer and should be more widely known and used by economists. In fact, my standard advice to graduate students known and used by economists. In fact, my standard advice to graduate students these days is go to the computer science department and take a class in machine these days is go to the computer science department and take a class in machine learning. There have been very fruitful collaborations between computer scien-learning. There have been very fruitful collaborations between computer scien-tists and statisticians in the last decade or so, and I expect collaborations between tists and statisticians in the last decade or so, and I expect collaborations between computer scientists and econometricians will also be productive in the future. computer scientists and econometricians will also be productive in the future.},
author = {Varian, Hal R},
doi = {10.1257/jep.28.2.3},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Varian - Unknown - Big Data New Tricks for Econometrics Tools to Manipulate Big Data.pdf:pdf},
journal = {Journal of Economic Perspectives—Volume},
number = {2},
pages = {2014--3},
title = {{Big Data: New Tricks for Econometrics Tools to Manipulate Big Data}},
url = {http://dx.doi.org/10.1257/jep.28.2.3},
volume = {28}
}
@misc{,
title = {{cjag\_1163\_LR - AgResEconCompModelingOverview.NolanParkerVanKootenBerger2009.pdf}},
url = {http://www2.econ.iastate.edu/tesfatsi/AgResEconCompModelingOverview.NolanParkerVanKootenBerger2009.pdf},
urldate = {2014-08-15}
}
@article{Galbraith2014,
author = {Galbraith, John W and Tkacz, Greg},
title = {{Nowcasting GDP : Electronic Payments , Data Vintages and the Timing of Data Releases}},
year = {2014}
}
@article{Giacomini2010,
author = {Giacomini, Raffaella and Rossi, Barbara},
doi = {10.1002/jae.1177},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/Giacomini\_et\_al-2010-Journal\_of\_Applied\_Econometrics.pdf:pdf},
issn = {08837252},
journal = {Journal of Applied Econometrics},
month = jun,
number = {4},
pages = {595--620},
title = {{Forecast comparisons in unstable environments}},
url = {http://doi.wiley.com/10.1002/jae.1177},
volume = {25},
year = {2010}
}
@article{Hendry,
abstract = {To forecast an aggregate, we propose adding disaggregate variables, instead of combining forecasts of those disaggregates or forecasting by a univariate aggregate model. New analytical results show the effects of changing coefficients, mis-specification, estimation uncertainty and mis-measurement error. Forecastorigin shifts in parameters affect absolute, but not relative, forecast accuracies; mis-specification and estimation uncertainty induce forecast-error differences, which variable-selection procedures or dimension reductions can mitigate. In Monte Carlo simulations, different stochastic structures and interdependencies between disaggregates imply that including disaggregate information in the aggregate model improves forecast accuracy. Our theoretical predictions and simulations are corroborated when forecasting aggregate US inflation pre- and post 1984 using disaggregate sectoral data. JEL Classification: C51, C53, E31},
author = {Hendry, David F. and Hubrich, Kirstin},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hendry, Hubrich - Unknown - Combining disaggregate forecasts or combining disaggregate information to forecast an aggregate.pdf:pdf},
journal = {Working Paper Series},
keywords = {Aggregate forecasts,Disaggregate information,forecast combination,inflation},
publisher = {European Central Bank},
title = {{Combining disaggregate forecasts or combining disaggregate information to forecast an aggregate}},
url = {http://ideas.repec.org/p/ecb/ecbwps/20101155.html}
}
@article{Koop2004,
author = {Koop, G and Potter, S},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/Koop\_et\_al-2004-The\_Econometrics\_Journal.pdf:pdf},
journal = {The Econometrics Journal},
keywords = {bayesian model averaging,composition,diffusion index,markov chain monte carlo,model,reference prior},
pages = {550--565},
title = {{Forecasting in dynamic factor models using Bayesian model averaging}},
url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1368-423X.2004.00143.x/full},
volume = {7},
year = {2004}
}
@article{Koop2010a,
author = {Koop, Gary and Korobilis, Dimitris},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/koop\_korobilis\_forecasting\_inflation\_using\_DMA.pdf:pdf},
journal = {International Economic Review},
keywords = {20 th ec 2,analysis,bayesian,c11,c53,cation,conference in real-time econometrics,e31,e37,eurostat colloquim on modern,jel classi,phillips curve,state space model,thank participants at the,the,the 6 th,the european central bank,tools for business cycle,we would like to},
number = {3},
pages = {867--886},
title = {{Forecasting In inflation using dynamic model averaging}},
volume = {53},
year = {2010}
}
@article{Mol2006,
author = {Mol, Christine De and Giannone, Domenico and Reichlin, Lucrezia},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/De Mol, Giannone, Reichlin - 2006 - Forecasting using a large number of predictors is Bayesian regression a valid alternative to princip.pdf:pdf},
keywords = {Bayesian VAR, ridge regression, Lasso regression,},
number = {700},
title = {{Wo R K I N G Pa P E R S E R I E S No 700 / December 2006 Forecasting Using a Large Number of Predictors Is Bayesian Regression a Valid Alternative To Principal Components ? Wo R K I N G Pa P E R S E R I E S a Large Number of Predictors Is Bayesian Regress}},
year = {2006}
}
@article{No2013,
author = {No, Paper},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/No - 2013 - Australian School of Business Working Paper Forecasting using a large number of predictors Bayesian model averaging versus.pdf:pdf},
title = {{Australian School of Business Working Paper Forecasting using a large number of predictors : Bayesian model averaging versus principal components regression}},
year = {2013}
}
@article{Ouysse2013,
author = {Ouysse, Rachida},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/ouysse.pdf:pdf},
title = {{Forecasting using a large number of predictors: Bayesian model averaging versus principal components regression}},
year = {2013}
}
@techreport{Andreou2010,
author = {Andreou, Elena and Ghysels, Eric and Kourtellos, Andros},
month = nov,
publisher = {University of Cyprus Department of Economics},
title = {{Forecasting with mixed-frequency data}},
url = {http://econpapers.repec.org/RePEc:ucy:cypeua:10-2010},
year = {2010}
}
@article{Clements2008a,
abstract = {Many macroeconomic series, such as U.S. real output growth, are sampled quarterly, although potentially useful predictors are often observed at a higher frequency. We look at whether a mixed data-frequency sampling (MIDAS) approach can improve forecasts of output growth. The MIDAS specification used in the comparison uses a novel way of including an autoregressive term. We find that the use of monthly data on the current quarter leads to significant improvement in forecasting current and next quarter output growth, and that MIDAS is an effective way to exploit monthly data compared with alternative methods.},
author = {Clements, Michael P. and Galv\~{a}o, Ana Beatriz},
doi = {10.1198/073500108000000015},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/AR-MIDAS/Forecasting Output Growth in the United States.pdf:pdf},
isbn = {4424765229},
issn = {0735-0015},
keywords = {H Social Sciences,HC Economic History and Conditions,QA Mathematics},
number = {May 2015},
pages = {37--41},
title = {{Macroeconomic Forecasting With Mixed-Frequency Data: Forecasting Output Growth in the United States}},
url = {http://dx.doi.org/10.1198/073500108000000015},
year = {2008}
}
@article{Commandeur2007,
author = {Commandeur, Jacques J.F and Koopman, Siem Jan},
file = {:E$\backslash$:/Dropbox/phd/statespace/Intro to Time Series Analysis by State Space Methods.pdf:pdf},
isbn = {978-0-19-922887-4},
pages = {189},
title = {{An Introduction to State Space Time Series Analysis}},
year = {2007}
}
@article{Semieniuk2014,
author = {Semieniuk, Gregor and Treeck, Till Van and Truger, Achim},
file = {:E$\backslash$:/Dropbox/phd/nowcast/Mixed frequency structural VARs/Mixed frequency structural VARs.pdf:pdf},
isbn = {4921177783},
number = {October 2011},
title = {{Working Paper}},
year = {2014}
}
@article{Banbura2010a,
abstract = {In this paper we propose a methodology to estimate a dynamic factor model on data sets with an arbitrary pattern of missing data. We modify the Expectation Maximisation (EM) algorithm as proposed for a dynamic factor model by Watson and Engle (1983) to the case with general pattern of missing data. We also extend the model to the case with serially correlated idiosyncratic component. The framework allows to handle efficiently and in an automatic manner sets of indicators characterized by different publication delays, frequencies and sample lengths. This can be relevant e.g. for young economies for which many indicators are compiled only since recently. We also show how to extract a model based news from a statistical data release within our framework and we derive the relationship between the news and the resulting forecast revision. This can be used for interpretation in e.g. nowcasting applications as it allows to determine the sign and size of a news as well as its contribution to the revision, in particular in case of simultaneous data releases. We evaluate the methodology in a Monte Carlo experiment and we apply it to nowcasting and backdating of euro area GDP.},
author = {Banbura, Marta and Modugno, Michele},
journal = {Journal of Applied Econometrics},
keywords = {Factor Models,Forecasting,Large Cross-Sections},
title = {{Maximum likelihood estimation of factor models on data sets with arbitraty pattern of missing data}},
year = {2010}
}
@article{Castle2013,
abstract = {We consider forecasting with factors, variables and both, modeling in-sample using Autometrics so all principal components and variables can be included jointly, while tackling multiple breaks by impulse-indicator saturation. A forecast-error taxonomy for factor models highlights the impacts of location shifts on forecast-error biases. Forecasting US GDP over 1-, 4- and 8-step horizons using the dataset from Stock and Watson (2009) updated to 2011:2 shows factor models are more useful for nowcasting or short-term forecasting, but their relative performance declines as the forecast horizon increases. Forecasts for GDP levels highlight the need for robust strategies, such as intercept corrections or differencing, when location shifts occur as in the recent financial crisis.},
author = {Castle, Jennifer L. and Clements, Michael P. and Hendry, David F.},
doi = {10.1016/j.jeconom.2013.04.015},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/factors/Forecasting by factors, by variables, by both or neither.pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
keywords = {Autometrics,C22,C51,Factor models,Forecasting,Impulse-indicator saturation,Model selection},
month = dec,
number = {2},
pages = {305--319},
title = {{Forecasting by factors, by variables, by both or neither?}},
url = {http://www.sciencedirect.com/science/article/pii/S0304407613000924},
volume = {177},
year = {2013}
}
@article{Kuzin2011,
abstract = {This paper compares the mixed-data sampling (MIDAS) and mixed-frequency VAR (MF-VAR) approaches to model specification in the presence of mixed-frequency data, e.g. monthly and quarterly series. MIDAS leads to parsimonious models which are based on exponential lag polynomials for the coefficients, whereas MF-VAR does not restrict the dynamics and can therefore suffer from the curse of dimensionality. However, if the restrictions imposed by MIDAS are too stringent, the MF-VAR can perform better. Hence, it is difficult to rank MIDAS and MF-VAR a priori, and their relative rankings are better evaluated empirically. In this paper, we compare their performances in a case which is relevant for policy making, namely nowcasting and forecasting quarterly GDP growth in the euro area on a monthly basis, using a set of about 20 monthly indicators. It turns out that the two approaches are more complements than substitutes, since MIDAS tends to perform better for horizons up to four to five months, whereas MF-VAR performs better for longer horizons, up to nine months. ?? 2010 International Institute of Forecasters.},
author = {Kuzin, Vladimir and Marcellino, Massimiliano and Schumacher, Christian},
doi = {10.1016/j.ijforecast.2010.02.006},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/VAR/MIDAS versus mixed-frequency VAR.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {MIDAS,Mixed-frequency VAR,Mixed-frequency data,Nowcasting},
number = {2},
pages = {529--542},
title = {{MIDAS vs. mixed-frequency VAR: Nowcasting GDP in the euro area}},
volume = {27},
year = {2011}
}
@article{Altissimo2010,
abstract = {Removal of short-run dynamics from a stationary time series to isolate the medium- to long-run component can be obtained by a bandpass filter. However, bandpass filters are infinite moving averages and can therefore deteriorate at the end of the sample. This is a well-known result in the literature isolating the business cycle in integrated series. We show that the same problem arises with our application to stationary time series. In this paper, we develop a method to obtain smoothing of a stationary time series by using only contemporaneous values of a large data set, so that no end-of-sample deterioration occurs. Our method is applied to the construction of New Eurocoin, an indicator of economic activity for the euro area, which is an estimate, in real time, of the medium- to long-run component of GDP growth. As our data set is monthly and most of the series are updated with a short delay, we are able to produce a monthly real-time indicator. As an estimate of the medium- to long-run GDP growth, Eurocoin performs better than the bandpass filter at the end of the sample in terms of both fitting and turning-point signaling. (c) 2010 The President and Fellows of Harvard College and the Massachusetts Institute of Technology.},
author = {Altissimo, Filippo and Cristadoro, Riccardo and Forni, Mario and Lippi, Marco and Veronese, Giovanni},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/UMIDAS/New Eurocoin Tracking Economic Growth in Real Time.pdf:pdf},
journal = {The Review of Economics and Statistics},
number = {4},
pages = {1024--1034},
publisher = {MIT Press},
title = {{New Eurocoin: Tracking Economic Growth in Real Time}},
url = {http://ideas.repec.org/a/tpr/restat/v92y2010i4p1024-1034.html},
volume = {92},
year = {2010}
}
@article{Ba2011,
author = {Ba, Marta and Giannone, Domenico},
file = {:E$\backslash$:/Dropbox/phd/nowcast/Nowcasting with Daily Data.pdf:pdf},
title = {{Nowcasting with daily data}},
year = {2011}
}
@article{Boivin2005,
abstract = {The paper assesses the extent to which forecasts are influenced by (i) how the factors are estimated, and (ii) how the forecasts are formulated. for simple data generating processes, and when the dynamic structure of the data is known, no one method is systematically good or bad. However, for the complex error and dynamic structures encountered in practice, the best method is to forecast the series of interest directly, rather than the common component and idiosyncratic components separately. By imposing fewer constraints, the method appears to be less vulnerable to misspecification.},
author = {Boivin, Jean and Ng, Serena},
journal = {NBER Working Paper},
keywords = {FAVAR,Nowcasting},
pages = {117--151},
title = {{Understanding and comparing factor-based forecasts}},
url = {http://www.nber.org/papers/w11285},
volume = {11285},
year = {2005}
}
@article{Eraker2008,
abstract = {Economic data can be collected at a variety of frequencies. Typically, estimation is done using the frequency of the coarsest data. This paper discusses how to combine data of different frequencies in estimating Vector Autoregressions (VAR’s). The method is based on Bayesian Gibbs sampling using a missing data formulation for coarsely observed data. Our approach has the primary advantage that it increases the accuracy of parameter estimates relative to estimates obtained from coarse data. We demonstrate this through an example where we estimate a model for economic growth based on quarterly observations of GDP, monthly industrial production, and yield curve data. Estimates of the posterior standard deviations are uniformly lower for the BMF estimator. Experiments with artificially simulated data further docu- ments the efficiency gains.},
author = {Eraker, Bj\o rn and Wai, Ching and Chiu, Jeremy and Foerster, Andrew and Kim, Tae Bong},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bayesian Mixed Frequency VAR’s.pdf:pdf},
pages = {1--21},
title = {{Bayesian Mixed Frequency VAR's}},
year = {2008}
}
@article{Foroni2014a,
abstract = {In this paper, we focus on the different methods which have been proposed in the literature to date for dealing with mixed-frequency and ragged-edge datasets: bridge equations, mixed-data sampling (MIDAS), and mixed-frequency VAR (MF-VAR) models. We discuss their performances for nowcasting the quarterly growth rate of the Euro area GDP and its components, using a very large set of monthly indicators. We investigate the behaviors of single indicator models, forecast combinations and factor models, in a pseudo real-time framework. MIDAS with an AR component performs quite well, and outperforms MF-VAR at most horizons. Bridge equations perform well overall. Forecast pooling is superior to most of the single indicator models overall. Pooling information using factor models gives even better results. The best results are obtained for the components for which more economically related monthly indicators are available. Nowcasts of GDP components can then be combined to obtain nowcasts for the total GDP growth. ?? 2013 International Institute of Forecasters.},
author = {Foroni, Claudia and Marcellino, Massimiliano},
doi = {10.1016/j.ijforecast.2013.01.010},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Foroni, Marcellino - 2014 - A comparison of mixed frequency approaches for nowcasting Euro area macroeconomic aggregates.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Bridge models,Factor models,MIDAS,Mixed-frequency VAR,Mixed-frequency data,Nowcasting},
title = {{A comparison of mixed frequency approaches for nowcasting Euro area macroeconomic aggregates}},
year = {2014}
}
@article{Ghysels2012,
abstract = {Many time series are sampled at different frequencies. When we study co-movements between such series we usually analyze the joint process sampled at a common low frequency. This has consequences in terms of potentially mis-specifying the co- movements and hence the analysis of impulse response functions - a commonly used tool for economic policy analysis. We introduce a class of mixed frequency VAR models that allows us to measure the impact of high frequency data on low frequency and vice versa. Our approach does not rely on latent processes/shocks representations. As a consequence, the mixed frequency VAR is an alternative to commonly used state space models for mixed frequency data. State space models are parameter-driven whereas mixed frequency VAR models are observation-driven models as they are formulated exclusively in terms of observable data and do not involve latent processes as well as shocks and thus avoid the need to formulate measurement equations, filtering etc. We also propose various parsimonious parameterizations, in part inspired by recent work on MIDAS regressions. We also explicitly characterize the mis-specification of a traditional common low frequency VAR and its implied mis-specified impulse response functions. The class of mixed frequency VAR models can also characterize the timing of information releases for a mixture of sampling frequencies and the real-time updating of predictions caused by the flow of high frequency information. Various estimation procedures for mixed frequency VAR models are also proposed, both classical and Bayesian. Numerical and empirical examples quantify the consequences of ignoring mixed frequency data.},
author = {Ghysels, Eric},
journal = {University of North Carolina Working Paper},
number = {July 2011},
pages = {1--52},
title = {{Macroeconomics and the Reality of Mixed Frequency Data}},
year = {2012}
}
@article{Ghysels2012a,
author = {Ghysels, Eric},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/VAR/Mixed Frequency Vector Autoregressive Models.pdf:pdf},
number = {July 2011},
title = {{Mixed Frequency Vector Autoregressive Models}},
year = {2012}
}
@article{Hansen2013,
author = {Hansen, Bruce E},
keywords = {2012,and participants at the,and the 2013 nber,cireq econometrics conference,co-editor,cross-validation,factor models,for helpful comments,forecast combination,frank diebold,frank schorfheide,generated regressors,hansen thanks the national,mallows,science foundation for research,summer institute forecasting seminar,support,the 2012 cesg meeting,the authors thank the,three referees},
pages = {1--37},
title = {{Forecasting with Factor-Augmented Regression : A Frequentist Model Averaging Approach}},
year = {2013}
}
@article{Hyndman2002,
abstract = {We provide a new approach to automatic forecasting based on an extended range of exponential smoothing methods. Each method in our taxonomy of exponential smoothing methods provides forecasts that are equivalent to forecasts from a state space model. This equivalence allows: (1) easy calculation of the likelihood, the AIC and other model selection criteria; (2) computation of prediction intervals for each method; and (3) random simulation from the underlying state space model. We demonstrate the methods by applying them to the data from the M-competition and the M3-competition. The method provides forecast accuracy comparable to the best methods in the competitions; it is particularly good for short forecast horizons with seasonal data. ?? 2002 International Institute of Forecasters. Published by Elsevier Science B.V. All rights reserved.},
author = {Hyndman, Rob J. and Koehler, Anne B. and Snyder, Ralph D. and Grose, Simone},
doi = {10.1016/S0169-2070(01)00110-8},
isbn = {0169-2070},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Automatic forecasting,Exponential smoothing,Prediction intervals,State space models},
number = {3},
pages = {439--454},
title = {{A state space framework for automatic forecasting using exponential smoothing methods}},
volume = {18},
year = {2002}
}
@article{Scott2013,
abstract = {We consider the problem of short-term time series forecasting (nowcasting) when there are more possible predictors than observations. Our approach combines three Bayesian techniques: Kalman filtering, spike-and-slab regression, and model averaging. We illustrate this approach using search engine query data as predictors for consumer sentiment and gun sales.},
author = {Scott, Steven L and Varian, Hal R},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Scott, Varian - 2013 - NBER WORKING PAPER SERIES BAYESIAN VARIABLE SELECTION FOR NOWCASTING ECONOMIC TIME SERIES Bayesian Variable Selec.pdf:pdf},
title = {{NBER WORKING PAPER SERIES BAYESIAN VARIABLE SELECTION FOR NOWCASTING ECONOMIC TIME SERIES Bayesian Variable Selection for Nowcasting Economic Time Series}},
url = {http://www.nber.org/papers/w19567},
year = {2013}
}
@article{Sells1993,
abstract = {A dynamic programme to optimise the farmer's long-term weed management problem is described in general terms incoroprating decisions of crop, autumn cultivations, timing of planting (winter and spring crops) and herbicide use. This paper concentrates on the formulation of one-step transition probabilities given the uncertain nature of herbicide performance. For a model considering one weed in a cereal rotation examples of the one-step transition probability matrices are given for wild oat control with and without a herbicide application.},
author = {Sells, J.E.},
doi = {10.1016/0308-521X(93)90080-L},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Sells - 1993 - Calculating transition probabilities for modelling weed management options using stochastic dynamic programming.pdf:pdf},
issn = {0308521X},
journal = {Agricultural Systems},
month = jan,
number = {1},
pages = {41--52},
title = {{Calculating transition probabilities for modelling weed management options using stochastic dynamic programming}},
url = {http://www.sciencedirect.com/science/article/pii/0308521X9390080L},
volume = {41},
year = {1993}
}
@article{Tkacz2001,
abstract = {The objective of this paper is to improve the accuracy of financial and monetary forecasts of Canadian output growth by using leading indicator neural network models. We find that neural networks yield statistically lower forecast errors for the year-over-year growth rate of real GDP relative to linear and univariate models. However, such forecast improvements are less notable when forecasting quarterly real GDP growth. Neural networks are unable to outperform a naive no-change model. More pronounced non-linearities at the longer horizon is consistent with the possible asymmetric effects of monetary policy on the real economy. ?? International Institute of Forecasters.},
author = {Tkacz, Greg},
doi = {10.1016/S0169-2070(00)00063-7},
file = {:E$\backslash$:/Dropbox/phd/forecast/Canada/Neural network forecasting of Canadian GDP growth.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Artificial neural networks,Backpropagation,Business cycle indicators,Comparative methods,Economic policy,Macroeconomic forecasting,Macroeconomic indicators},
number = {1},
pages = {57--69},
title = {{Neural network forecasting of Canadian GDP growth}},
volume = {17},
year = {2001}
}
@article{Zeng2014,
author = {Zeng, Jing},
file = {:E$\backslash$:/Dropbox/phd/shrinkage/WP\_20\_JingZeng\_2014.pdf:pdf},
title = {{Forecasting Aggregates with Disaggregate Variables : Does Boosting Help to Select the Most Relevant Predictors ?}},
year = {2014}
}
@article{Scott2014,
abstract = {We consider the problem of short-term time series forecasting (nowcasting) when there are more possible predictors than observations. The motivating example is the use of Google Trends search engine query data as a contemporaneous predictor of economic indicators. Our preferred approach combines three Bayesian techniques: Kalman filtering, spike-and-slab regression, and model averaging. The Kalman filter can be used to control for time series feature, such as seasonality and trend; the regression can be used to incorporate predictors such as search engine queries; and model averaging can be used to reduce the danger of overfitting. Overall the Bayesian approach allows a flexible way to incorporate prior knowledge, both subjective and objective, into the estimation procedure. We illustrate this approach using search engine query data as predictors for consumer sentiment and gun sales.},
author = {Scott, Steven L and Varian, Hal R},
file = {:E$\backslash$:/Dropbox/phd/bsts/fat.pdf:pdf},
journal = {Economics of Digitization},
number = {July 2012},
pages = {1--22},
title = {{Bayesian Variable Selection for Nowcasting Economic Time Series}},
url = {http://www.nber.org/papers/w19567.pdf},
year = {2014}
}
@article{Stock2009,
abstract = {An ongoing theme in David Hendry’s work has been concern about detecting and avoiding forecast breakdowns that arise because of structural instability. Parameter instability can arise for various reasons, including structural breaks in the economy (for example, changes in technology), policy regime shifts, or changes in the survey instruments from which the time series are constructed. Hendry and coauthors have argued that such instability, whatever its source, often manifests itself as breaks in time series forecasting relations, and moreover that such breaks constitute one of the primary reasons for forecast failures in practice (see for example Clements and Hendry [1999, 2002], Hendry and Clements [2002], Hendry [2005], and Hendry and Mizon [2005]). One line of Hendry’s research has been to develop and to analyze non-structural forecasting methods for their potential robustness to parameter instability, including error correction models, overdifferencing, intercept shift methods, and – closest to the focus of this paper – forecast pooling (Hendry and Clements [2002]). This paper continues this line of inquiry, in which forecasting methods are examined for their reliability in the face of structural breaks. We focus here on forecasts constructed using dynamic factor models (DFMs; Geweke [1977], Sargent and Sims [1977]). In DFMs, the comovements of the observable time series are characterized by latent dynamic factors. Over the past decade, work on DFMs has focused on high- dimensional systems in which very many series depend on a handful of factors (Forni, Lippi, Hallin, and Reichlin [2000], Stock and Watson [2002a, 2002b], and many others; for a survey, see Stock and Watson [2006]). These factor-based forecasts have had notable empirical forecasting successes. Yet, there has been little work to date on the performance of factor-based macroeconomic forecasts under structural instability (exceptions are Stock and Watson (1998, 2002b) and Banerjee, Marcellino, and Masten (2007), which are discussed below).},
author = {Stock, J.H. and Watson, M.W.},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/hendryfestschrift\_stockwatson\_April282008-instability.pdf:pdf},
journal = {The Methodology and Practice of Econometrics. A Festschrift in Honour of David F. Hendry},
number = {August 2007},
pages = {173--205},
title = {{Forecasting in Dynamic Factor Models Subject To Structural Instability}},
url = {http://books.google.com/books?hl=en\&amp;lr=\&amp;id=-w66-4C0JDcC\&amp;oi=fnd\&amp;pg=PA173\&amp;dq=FORECASTING+IN+DYNAMIC+FACTOR+MODELS+SUBJECT+TO+STRUCTURAL+INSTABILITY\&amp;ots=bcvHlMU9bN\&amp;sig=Y-SvteVpCVvtb5QV-sAU2-dS-2o},
year = {2009}
}
@article{Wright2009,
author = {Wright, Jonathan H.},
doi = {10.1002/for.1088},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/Wright-2009-Journal\_of\_Forecasting.pdf:pdf},
issn = {02776693},
journal = {Journal of Forecasting},
month = mar,
number = {2},
pages = {131--144},
title = {{Forecasting US inflation by Bayesian model averaging}},
url = {http://doi.wiley.com/10.1002/for.1088},
volume = {28},
year = {2009}
}
@article{,
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Unknown - 2006 - Per Capita.pdf:pdf},
journal = {EnANPAD},
number = {3},
pages = {1--16},
title = {{Per Capita}},
year = {2006}
}
@article{Bai2002,
abstract = {In this paper we develop some econometric theory for factor models$\backslash$nof large dimensions. The focus is the determination of the number$\backslash$nof factors (r), which is an unresolved issue in the rapidly growing$\backslash$nliterature on multifactor models. We first establish the convergence$\backslash$nrate for the factor estimates that will allow for consistent estimation$\backslash$nof r. We then propose some panel criteria and show that the number$\backslash$nof factors can be consistently estimated using the criteria. The$\backslash$ntheory is developed under the framework of large cross-sections (N)$\backslash$nand large time dimensions (T). No restriction is imposed on the relation$\backslash$nbetween N and T. Simulations show that the proposed criteria have$\backslash$ngood finite sample properties in many configurations of the panel$\backslash$ndata encountered in practice.},
author = {Bai, Jushan and Ng, Serena},
doi = {10.1111/1468-0262.00273},
isbn = {0012-9682},
issn = {0012-9682},
journal = {Econometrica},
keywords = {asset pricing,factor analysis,model selection,principal components},
number = {1},
pages = {191--221},
title = {{Determining the Number of Factors in Approximate Factor Models}},
volume = {70},
year = {2002}
}
@article{BANBURA2014,
author = {BAŃBURA, MARTA and MODUGNO, MICHELE},
doi = {10.1002/jae},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/BAŃBURA, MODUGNO - 2014 - MAXIMUM LIKELIHOOD ESTIMATION OF FACTOR MODELS ON DATASETS WITH ARBITRARY PATTERN OF MISSING DATA.pdf:pdf},
issn = {01451707},
journal = {Journal of Applied Econometrics},
number = {4},
pages = {133--160},
title = {{MAXIMUM LIKELIHOOD ESTIMATION OF FACTOR MODELS ON DATASETS WITH ARBITRARY PATTERN OF MISSING DATA}},
volume = {29},
year = {2014}
}
@article{Bilmes1998,
abstract = {We describe the maximum-likelihood parameter estimation problem and how the Expectation-Maximization (EM) algorithm can be used for its solution. We first describe the abstract form of the EM algorithm as it is often given in the literature. We then develop the EM parameter estimation procedure for two applications: 1) finding the parameters of a mixture of Gaussian densities, and 2) finding the parameters of a hidden Markov model (HMM) (i.e., the Baum-Welch algorithm) for both discrete and Gaussian mixture observation models. We derive the update equations in fairly explicit detail but we do not prove any convergence properties. We try to emphasize intuition rather than mathematical rigor.},
author = {Bilmes, Jeff a.},
doi = {10.1.1.119.4856},
file = {:E$\backslash$:/Dropbox/phd/statespace/em.pdf:pdf},
isbn = {0226775429},
journal = {International Computer Science Institute},
number = {510},
pages = {126},
title = {{A gentle tutorial of the EM algorithm and its application to parameter estimation for Gaussian mixture and hidden Markov models}},
volume = {4},
year = {1998}
}
@article{Brauning2014,
abstract = {We explore a new approach to the forecasting of macroeconomic variables based on a dynamic factor state space analysis. Key economic variables are modeled jointly with principal components from a large time series panel of macroeconomic indicators using a multivariate unobserved components time series model. When the key economic variables are observed at a low frequency and the panel of macroeconomic variables is at a high frequency, we can use our approach for both nowcasting and forecasting purposes. Given a dynamic factor model as the data generation process, we provide Monte Carlo evidence of the finite-sample justification of our parsimonious and feasible approach. We also provide empirical evidence for a US macroeconomic dataset. The unbalanced panel contains quarterly and monthly variables. The forecasting accuracy is measured against a set of benchmark models. We conclude that our dynamic factor state space analysis can lead to higher levels of forecasting precision when the panel size and time series dimensions are moderate.},
author = {Br\"{a}uning, Falk and Koopman, Siem Jan},
doi = {10.1016/j.ijforecast.2013.03.004},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Br\"{a}uning, Koopman - 2014 - Forecasting macroeconomic variables using collapsed dynamic factor analysis.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Kalman filter,Maximum likelihood method,Principal components,State space dynamic factor model},
month = jul,
number = {3},
pages = {572--584},
title = {{Forecasting macroeconomic variables using collapsed dynamic factor analysis}},
url = {http://www.sciencedirect.com/science/article/pii/S0169207013000459},
volume = {30},
year = {2014}
}
@article{An2011,
author = {Chen, Yu-chin and Tsay, Wen-Jen},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/ADL-ssrn.pdf:pdf},
keywords = {2011,and jonathan wright for,autoregressive distributed lag,c22,c53,cations,charles nelson,comments and discussions,commodity prices,department of,f31,f47,february,first draft,forecasting,jel classi,mixed frequency data,q02,the research is conducted,useful,visiting scholar in the,we thank eric ghysels,when tsay is a},
title = {{Forecasting Commodity Prices with Mixed\# Frequency Data: An OLS Based Generalized ADL Approach}},
url = {http://www.econ.sinica.edu.tw/upload/file/(20111020)(1).pdf},
year = {2011}
}
@article{DeMol2008a,
abstract = {This paper considers Bayesian regression with normal and double-exponential priors as forecasting methods based on large panels of time series. We show that, empirically, these forecasts are highly correlated with principal component forecasts and that they perform equally well for a wide range of prior choices. Moreover, we study conditions for consistency of the forecast based on Bayesian regression as the cross-section and the sample size become large. This analysis serves as a guide to establish a criterion for setting the amount of shrinkage in a large cross-section. ?? 2008 Elsevier B.V. All rights reserved.},
author = {{De Mol}, Christine and Giannone, Domenico and Reichlin, Lucrezia},
doi = {10.1016/j.jeconom.2008.08.011},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/De Mol, Giannone, Reichlin - 2008 - Forecasting using a large number of predictors Is Bayesian shrinkage a valid alternative to princ(2).pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
keywords = {Bayesian VAR,Bayesian shrinkage,Large cross-sections,Lasso regression,Principal components,Ridge regression},
title = {{Forecasting using a large number of predictors: Is Bayesian shrinkage a valid alternative to principal components?}},
year = {2008}
}
@article{Foroni2013,
abstract = {The development of models for variables sampled at di¤erent frequencies has attracted substantial interest in the recent econometric literature. In this paper we provide an overview of the most common techniques, including bridge equa- tions, MIxed DAta Sampling (MIDAS) models, mixed frequency VARs, and mixed frequency factor models. We also consider alternative techniques for handling the ragged edge of the data, due to asynchronous publication. Finally, we survey the main empirical applications based on alternative mixed frequency models. J.E.L.},
author = {Foroni, Claudia and Marcellino, Massimiliano},
doi = {10.2139/ssrn.2268912},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/A survey of econometric methods for mixed-frequency data.pdf:pdf},
isbn = {9788275537230},
issn = {1556-5068},
keywords = {Claudia Foroni,Massimiliano Marcellino},
title = {{A Survey of Econometric Methods for Mixed-Frequency Data}},
year = {2013}
}
@article{Koopman2012,
author = {Koopman, Siem Jan and Ooms, Marius},
doi = {10.1093/oxfordhb/9780195398649.013.0006},
file = {:E$\backslash$:/Dropbox/phd/statespace/SiemJanKoopman-final-2010UCForecasting.pdf:pdf},
isbn = {9780199940325},
title = {{Forecasting Economic Time Series Using Unobserved Components Time Series Models}},
url = {http://oxfordhandbooks.com/view/10.1093/oxfordhb/9780195398649.001.0001/oxfordhb-9780195398649-e-6},
year = {2012}
}
@article{Koopman2013,
abstract = {We extend the class of dynamic factor yield curve models in order to include macroeconomic factors. Our work benefits from recent developments in the dynamic factor literature related to the extraction of the common factors from a large panel of macroeconomic series and the estimation of the parameters in the model. We include these factors in a dynamic factor model for the yield curve, in which we model the salient structure of the yield curve by imposing smoothness restrictions on the yield factor loadings via cubic spline functions. We carry out a likelihood-based analysis in which we jointly consider a factor model for the yield curve, a factor model for the macroeconomic series, and their dynamic interactions with the latent dynamic factors. We illustrate the methodology by forecasting the U.S. term structure of interest rates. For this empirical study, we use a monthly time series panel of unsmoothed Fama–Bliss zero yields for treasuries of different maturities between 1970 and 2009, which we combine with a macro panel of 110 series over the same sample period. We show that the relationship between the macroeconomic factors and the yield curve data has an intuitive interpretation, and that there is interdependence between the yield and macroeconomic factors. Finally, we perform an extensive out-of-sample forecasting study. Our main conclusion is that macroeconomic variables can lead to more accurate yield curve forecasts.},
author = {Koopman, Siem Jan and van der Wel, Michel},
doi = {10.1016/j.ijforecast.2012.12.004},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Koopman, van der Wel - 2013 - Forecasting the US term structure of interest rates using a macroeconomic smooth dynamic factor model.pdf:pdf},
issn = {01692070},
journal = {International Journal of Forecasting},
keywords = {Fama–Bliss data set,Kalman filter,Maximum likelihood,Yield curve},
month = oct,
number = {4},
pages = {676--694},
title = {{Forecasting the US term structure of interest rates using a macroeconomic smooth dynamic factor model}},
url = {http://www.sciencedirect.com/science/article/pii/S0169207013000058},
volume = {29},
year = {2013}
}
@article{Stock2012b,
abstract = {This paper provides a simple shrinkage representation that describes the operational characteristics of various forecasting methods designed for a large number of orthogonal predictors (such as principal components). These methods include pretest methods, Bayesian model averaging, empirical Bayes, and bagging. We compare empirically forecasts from these methods to dynamic factor model (DFM) forecasts using a U.S. macroeconomic data set with 143 quarterly variables spanning 1960-2008. For most series, including measures of real economic activity, the shrinkage forecasts are inferior to the DFM forecasts},
author = {Stock, Jh and Watson, Mw},
doi = {10.1080/07350015.2012.715956},
file = {:E$\backslash$:/Dropbox/phd/statespace/dfm/dfmdata/stock\_watson\_generalized\_shrinkage\_supplement\_June\_2012.pdf:pdf},
isbn = {0735-0015$\backslash$r1537-2707},
issn = {0735-0015},
journal = {Journal of Business and Economic Statistics},
keywords = {dynamic factor models,empirical Bayes,high dimensional model},
number = {4},
pages = {481--493},
title = {{Generalized shrinkage methods for forecasting using many predictors}},
url = {http://amstat.tandfonline.com/doi/abs/10.1080/07350015.2012.715956},
volume = {30},
year = {2012}
}
@misc{Tversky1992,
abstract = {We develop a new version of prospect theory that employs cumulative rather than separable decision weights and extends the theory in several respects. This version, called cumulative prospect theory, applies to uncertain as well as to risky prospects with any number of outcomes, and it allows different weighting functions for gains and for losses. Two principles, diminishing sensitivity and loss aversion, are invoked to explain the characteris- tic curvature of the value function and the weighting functions. A review of the experimental evidence and the results of a new experiment confirm a distinctive fourfold pattern of risk attitudes: risk aversion for gains and risk seeking for losses of high probability; risk seeking for gains and risk aversion for losses of low probability. Expected},
author = {Tversky, Amos and Kahneman, Daniel},
booktitle = {Journal of Risk and Uncertainty},
doi = {10.1007/BF00122574},
isbn = {0521627494},
issn = {0895-5646},
number = {4},
pages = {297--323},
pmid = {15795132},
title = {{Advances in prospect theory: Cumulative representation of uncertainty}},
volume = {5},
year = {1992}
}
@article{Bai2013,
author = {Bai, Jennie and Ghysels, Eric and Wright, Jonathan H.},
doi = {10.1080/07474938.2012.690675},
file = {:E$\backslash$:/Dropbox/phd/statespace/State Space Models and MIDAS Regressions.pdf:pdf},
issn = {0747-4938},
journal = {Econometric Reviews},
number = {7},
pages = {779--813},
title = {{State Space Models and MIDAS Regressions}},
url = {http://www.tandfonline.com/doi/abs/10.1080/07474938.2012.690675},
volume = {32},
year = {2013}
}
@article{Buchen2011a,
abstract = {This paper evaluates the forecast performance of boosting in comparison to the forecast combination schemes and dynamic factor models presented in Stock and Watson (2006). We find that boosting is a serious competitor for forecasting US industrial production.},
author = {Buchen, Teresa and Wohlrabe, Klaus},
doi = {10.1016/j.econlet.2011.05.040},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Buchen, Wohlrabe - 2011 - Forecasting with many predictors Is boosting a viable alternative.pdf:pdf},
issn = {01651765},
journal = {Economics Letters},
keywords = {Boosting,Forecasting,Large datasets},
month = oct,
number = {1},
pages = {16--18},
title = {{Forecasting with many predictors: Is boosting a viable alternative?}},
url = {http://www.sciencedirect.com/science/article/pii/S0165176511002175},
volume = {113},
year = {2011}
}
@article{Castle2015,
abstract = {a b s t r a c t We investigate alternative robust approaches to forecasting, using a new class of robust devices, contrasted with equilibrium-correction models. Their forecasting properties are derived facing a range of likely empirical problems at the forecast origin, including mea-surement errors, impulses, omitted variables, unanticipated location shifts and incorrectly included variables that experience a shift. We derive the resulting forecast biases and error variances, and indicate when the methods are likely to perform well. The robust methods are applied to forecasting US GDP using autoregressive models, and also to autoregressive models with factors extracted from a large dataset of macroeconomic variables. We con-sider forecasting performance over the Great Recession, and over an earlier more quiescent period.},
author = {Castle, Jennifer L and Clements, Michael P and Hendry, David F},
doi = {10.1016/j.ijforecast.2014.11.002},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Castle, Clements, Hendry - 2015 - Robust approaches to forecasting.pdf:pdf},
journal = {International Journal of Forecasting},
keywords = {Factor models GDP forecasts,Forecast biases,Location shifts,Smoothed forecasting devices},
pages = {99--112},
title = {{Robust approaches to forecasting}},
volume = {31},
year = {2015}
}
@article{Clements2009,
author = {Clements, Michael P. and Galv\~{a}o, Ana Beatriz},
doi = {10.1002/jae.1075},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/AR-MIDAS/Clements\_et\_al-2009-Journal\_of\_Applied\_Econometrics.pdf:pdf},
issn = {08837252},
journal = {Journal of Applied Econometrics},
month = nov,
number = {7},
pages = {1187--1206},
title = {{Forecasting US output growth using leading indicators: an appraisal using MIDAS models}},
url = {http://doi.wiley.com/10.1002/jae.1075},
volume = {24},
year = {2009}
}
@article{Iteration,
author = {Iteration, Iteration},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Iteration - Unknown - The above results show that using both the mfx and prchange commands , we can get the same result of the marginal.pdf:pdf},
title = {{The above results show that using both the mfx and prchange commands , we can get the same result of the marginal effect of age on the Pr ( ins = 1 | x ).}}
}
@article{Jalles2009,
author = {Jalles, Jt},
doi = {10.2139/ssrn.1496864},
file = {:E$\backslash$:/Dropbox/phd/statespace/Structural Time Series Models and the Kalman Filter.pdf:pdf},
issn = {1556-5068},
journal = {FEUNL Work Pap},
keywords = {cointegration,likelihood,smoothing,sutse},
pages = {1--30},
title = {{Structural time series models and the Kalman Filter: a concise review}},
url = {http://fesrvsd.fe.unl.pt/WPFEUNL/WP2009/wp541.pdf},
year = {2009}
}
@misc{Krugman1991,
abstract = {This paper develops a simple model that shows how a country can endogenously become differentiated into an industrialized "core" and an agricultural "periphery." In order to realize scale economies while minimizing transport costs, manufacturing firms tend to locate in the region with larger demand, but the location of demand itself depends on the distribution of manufacturing. Emergence of a core-periphery pattern depends on transportations costs, economies of scale, and the share of manufacturing in national income.},
author = {Krugman, Paul},
booktitle = {Journal of Political Economy},
doi = {10.1086/261763},
isbn = {00223808},
issn = {0022-3808},
number = {3},
pages = {483},
pmid = {1416},
title = {{Increasing Returns and Economic Geography}},
volume = {99},
year = {1991}
}
@article{Radchenko2006,
abstract = {Using Markov Chain Monte Carlo algorithms within the limited information Bayesian framework, we estimate the parameters of the structural equation of interest and test weak exogeneity in a simultaneous equation model with white noise as well as autocorrelated error terms. A numerical example and an estimation of the supply and demand equations of the U.S. gasoline market show that if we ignore autocorrelation we obtain unreasonable posterior distributions of the parameters of interest. Also we find that the hypothesis of the asymmetric effect of the changes in oil price on the changes in gasoline price is rejected. Oil inventory has a significant negative effect on the gasoline price.},
author = {Radchenko, Stanislav and Tsurumi, Hiroki},
doi = {10.1016/j.jeconom.2005.03.008},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Radchenko, Tsurumi - 2006 - Limited information Bayesian analysis of a simultaneous equation with an autocorrelated error term and its a.pdf:pdf},
issn = {03044076},
journal = {Journal of Econometrics},
month = jul,
number = {1},
pages = {31--49},
title = {{Limited information Bayesian analysis of a simultaneous equation with an autocorrelated error term and its application to the U.S. gasoline market}},
url = {http://www.sciencedirect.com/science/article/pii/S0304407605000825},
volume = {133},
year = {2006}
}
@article{Schorfheide2012,
abstract = {This paper develops a vector autoregression (VAR) for macroeconomic time series which are observed at mixed frequencies – quarterly and monthly. The mixed-frequency VAR is cast in state-space form and estimated with Bayesian methods under a Minnesota-style prior. Using a real-time data set, we generate and evaluate forecasts from the mixed-frequency VAR and compare them to forecasts from a VAR that is estimated based on data time-aggregated to quarterly frequency. We document how information that becomes available within the quarter improves the forecasts in real time.},
author = {Schorfheide, Frank and Song, Dongho},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Schorfheide, Song - 2012 - Real-time forecasting with a mixed-frequency VAR.pdf:pdf},
journal = {Working Papers},
keywords = {Bayesian statistical decision theory,Forecasting,Vector autoregression},
publisher = {Federal Reserve Bank of Minneapolis},
title = {{Real-time forecasting with a mixed-frequency VAR}},
url = {http://ideas.repec.org/p/fip/fedmwp/701.html},
year = {2012}
}
@article{Clements2014,
author = {Clements, Michael P},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/factors/Real-Time Factor Model Forecasting-2014-05-Clements.pdf:pdf},
number = {May},
title = {{Real-Time Factor Model Forecasting and the Effects of Instability}},
year = {2014}
}
@article{Lampos2012,
abstract = {We present a general methodology for inferring the occurrence and magnitude of an event or phenomenon by exploring the rich amount of unstructured textual information on the social part of the web. Having geo-tagged user posts on the microblogging service of Twitter as our input data, we investigate two case studies. The first consists of a benchmark problem, where actual levels of rainfall in a given location and time are inferred from the content of tweets. The second one is a real-life task, where we infer regional Influenza-like Illness rates in the effort of detecting timely an emerging epidemic disease. Our analysis builds on a statistical learning framework, which performs sparse learning via the bootstrapped version of LASSO to select a consistent subset of textual features from a large amount of candidates. In both case studies, selected features indicate close semantic correlation with the target topics and inference, conducted by regression, has a significant performance, especially given the short length — approximately one year — of Twitter's data time series.},
author = {Lampos, Vasileios and Cristianini, Nello},
doi = {10.1145/2337542.2337557},
file = {:E$\backslash$:/Dropbox/phd/forecast/Nowcasting with statistical learning.pdf:pdf},
issn = {21576904},
journal = {ACM Transactions on Intelligent Systems and Technology},
number = {4},
pages = {1--22},
title = {{Nowcasting Events from the Social Web with Statistical Learning}},
volume = {3},
year = {2012}
}
@article{Stocka,
author = {Stock, James H},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Stock - Unknown - Forecasting and Now-Casting with Disparate Predictors Dynamic Factor Models and Beyond FEMES 2006 Meetings Beijing.pdf:pdf},
title = {{Forecasting and Now-Casting with Disparate Predictors: Dynamic Factor Models and Beyond FEMES 2006 Meetings Beijing}}
}
@book{,
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/Forecasting and Bayesian Model Average.pdf:pdf},
isbn = {9172587105},
title = {{Essays on Forecasting and Bayesian Model Averaging}}
}
@article{Andreou2013b,
author = {Andreou, Elena and Ghysels, Eric and Kourtellos, Andros},
doi = {10.1080/07350015.2013.767199},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Andreou, Ghysels, Kourtellos - 2013 - Should Macroeconomic Forecasters Use Daily Financial Data and How(2).pdf:pdf},
isbn = {0735-0015$\backslash$r1537-2707},
issn = {0735-0015},
journal = {Journal of Business \& Economic Statistics},
keywords = {daily financial factors,financial markets and the,macroeconomy,midas regressions},
number = {2},
pages = {240--251},
title = {{Should Macroeconomic Forecasters Use Daily Financial Data and How?}},
url = {http://www.tandfonline.com/doi/abs/10.1080/07350015.2013.767199},
volume = {31},
year = {2013}
}
@article{Bates2013b,
abstract = {...  factors Bai and Ng (2002, 2006b) introduce a class of information criteria that consistently  estimate the true ... in order to ensure consistent  estimation of r than we did for consistency of the ... the factor loading instability is not alarming, even for c =3.5, unless consistent  estimation of r ... $\backslash$n},
author = {Bates, B J and Plagborg-M\o ller, M and Stock, J H},
file = {:E$\backslash$:/Dropbox/phd/statespace/dfm/dfmdata/BPSW\_JOE\_Revised\_Dec03.pdf:pdf},
journal = {Journal of \ldots},
number = {1977},
title = {{Consistent factor estimation in dynamic factor models with structural instability}},
url = {http://www.sciencedirect.com/science/article/pii/S0304407613000912$\backslash$npapers2://publication/uuid/AF85D59A-C924-4C75-B1B7-5E710ED77346},
year = {2013}
}
@article{Chauvet2014,
author = {Chauvet, Marcelle and Thomas, G and Hecq, Alain},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/A Mixed-Frequency VAR Approach.pdf:pdf},
pages = {1--34},
title = {{Realized Volatility and Business Cycle Fluctuations : A Mixed-Frequency VAR Approach}},
year = {2014}
}
@article{Cheng2013,
author = {Cheng, Xu and Schorfheide, Frank},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/factors/shrinkage/Shrinkage Estimation of High-Dimensional Factor Models with Structural Instabilities.pdf:pdf},
keywords = {consistent model selection,factor model,great recession,high-dimensional,large data sets,lasso,model,shrinkage estimation,structural break},
title = {{Shrinkage Estimation of High-Dimensional Factor Models with Structural Instabilities}},
year = {2013}
}
@article{Foroni2015,
author = {Foroni, Claudia and Marcellino, Massimiliano},
file = {:E$\backslash$:/Dropbox/phd/MIDAS/UMIDAS/Foroni\_et\_al-2015-Journal\_of\_the\_Royal\_Statistical\_Society\_\_Series\_A\_(Statistics\_in\_Society).pdf:pdf},
keywords = {distributed lag polynomals,mixed data sampling,nowcasting,time aggregation},
pages = {57--82},
title = {{Unrestricted mixed data sampling ( MIDAS ): MIDAS regressions with unrestricted lag polynomials}},
year = {2015}
}
@techreport{Goldfarb2015,
abstract = {As the cost of storing, sharing, and analyzing data has decreased, economic activity has become increasingly digital. But while the effects of digital technology and improved digital communication have been explored in a variety of contexts, the impact on economic activity—from consumer and entrepreneurial behavior to the ways in which governments determine policy—is less well understood. Economics of Digitization explores the economic impact of digitization, with each chapter identifying a promising new area of research. The Internet is one of the key drivers of growth in digital communication, and the first set of chapters discusses basic supply-and-demand factors related to access. Later chapters discuss new opportunities and challenges created by digital technology and describe some of the most pressing policy issues. As digital technologies continue to gain in momentum and importance, it has become clear that digitization has features that do not fit well into traditional economic models. This suggests a need for a better understanding of the impact of digital technology on economic activity, and Economic Analysis of the Digital Economy brings together leading scholars to explore this emerging area of research.},
author = {Goldfarb, Avi and Greenstein, Shane M. and Tucker, Catherine E.},
isbn = {9780226206844},
publisher = {University of Chicago Press},
title = {{Economic Analysis of the Digital Economy}},
url = {http://econpapers.repec.org/RePEc:ucp:bknber:9780226206844},
year = {2015}
}
@misc{Harvey2006a,
abstract = {Structural time series models are formulated in terms of components, such as trends, seasonals and cycles, that have a direct interpretation. As well as providing a framework for time series decomposition by signal extraction, they can be used for forecasting and for 'nowcasting'. The structural interpretation allows extensions to classes of models that are able to deal with various issues in multivariate series and to cope with non-Gaussian observations and nonlinear models. The statistical treatment is by the state space form and hence data irregularities such as missing observations are easily handled. Continuous time models offer further flexibility in that they can handle irregular spacing. The paper compares the forecasting performance of structural time series models with ARIMA and autoregressive models. Results are presented showing how observations in linear state space models are implicitly weighted in making forecasts and hence how autoregressive and vector error correction representations can be obtained. The use of an auxiliary series in forecasting and nowcasting is discussed. A final section compares stochastic volatility models with GARCH. ?? 2006 Elsevier B.V. All rights reserved.},
author = {Harvey, Andrew},
booktitle = {Handbook of Economic Forecasting},
doi = {10.1016/S1574-0706(05)01007-4},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Harvey - 2006 - Chapter 7 Forecasting with Unobserved Components Time Series Models.pdf:pdf},
isbn = {9780444513953},
issn = {15740706},
keywords = {Kalman filter,continuous time,cycles,non-Gaussian models,state space,stochastic trend,stochastic volatility},
pages = {327--412},
pmid = {11465069},
title = {{Chapter 7 Forecasting with Unobserved Components Time Series Models}},
volume = {1},
year = {2006}
}
@article{Marcellino2007d,
author = {Marcellino, Massimiliano and Bocconi, Universit\`{a} and Schumacher, Christian},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/01 slide Factor nowcasting of German GDP.pdf:pdf},
keywords = {authors,business cycle,data,ect the views of,large factor models,midas,missing values,mixed-frequency,not necessarily re,nowcasting,personal opinions and does,the,this paper represents the},
number = {October},
pages = {1--45},
title = {{Factor nowcasting of German GDP with ragged-edge data : A model comparison using MIDAS projections}},
year = {2007}
}
@article{Marcellino2007c,
author = {Marcellino, Massimiliano and Schumacher, Christian},
file = {:E$\backslash$:/Dropbox/phd/bsts/dynamicfator/factor/Factor-MIDAS for now- and forecasting.pdf:pdf},
keywords = {MIDAS, large factor models, nowcasting, mixed-freq},
number = {34},
title = {{Factor-MIDAS for now- and forecasting with ragged-edge data : a model comparison for German GDP Discussion Paper Series 1 : Economic Studies}},
year = {2007}
}
@article{Matter2009,
author = {Matter, Wealth},
number = {December},
title = {{Document De Travail}},
year = {2009}
}
@article{Structural2015,
author = {Structural, Title and Series, Time},
file = {:E$\backslash$:/Dropbox/phd/statespace/stsm.pdf:pdf},
title = {{Package ‘ stsm ’}},
year = {2015}
}
@misc{TheMendeleySupportTeam2011,
abstract = {A quick introduction to Mendeley. Learn how Mendeley creates your personal digital library, how to organize and annotate documents, how to collaborate and share with colleagues, and how to generate citations and bibliographies.},
address = {London},
author = {{The Mendeley Support Team}},
booktitle = {Mendeley Desktop},
file = {:C$\backslash$:/Users/snowdj/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/The Mendeley Support Team - 2011 - Getting Started with Mendeley.pdf:pdf},
keywords = {Mendeley,how-to,user manual},
pages = {1--16},
publisher = {Mendeley Ltd.},
title = {{Getting Started with Mendeley}},
url = {http://www.mendeley.com},
year = {2011}
}
@misc{,
keywords = {0444627324,Allan Timmermann,BUSINESS \& ECONOMICS / Economics / General,BUSINESS \& ECONOMICS / Finance / General,Banking,Banks \& Banking,Business \& Economics,Business \& Economics / Banks \& Banking,Business \& Economics / General,Business / Economics / Finance,Business/Economics,Econometric models,Economic Forecasting,Economics - General,Economics: Professional \& General,Finance - General,Forecasting,Graham Elliott,Handbook of Economic Forecasting SET 2A-2B,North Holland,Volume 2A \& 2B},
title = {{Handbook of Economic Forecasting SET 2A-2B, Volume 2A \& 2B: Graham Elliott, Allan Timmermann: 9780444627322: Amazon.com: Books}},
url = {http://www.amazon.com/Handbook-Economic-Forecasting-2A-2B-Volume/dp/0444627324},
urldate = {2015-05-06}
}
@article{Madigan1995a,
author = {Madigan, David and Raftery, Adrian E and Volinsky, Chris T and Hoeting, Jennifer a},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/bma1.pdf:pdf},
title = {{Bayesian Model Averaging}},
year = {1995}
}
@misc{Madigan1994a,
abstract = {Abstract We consider the problem of model selection and accounting for model uncertainty in high-dimensional contingency tables, motivated by expert system applications. The approach most used currently is a stepwise strategy guided by tests based on approximate asymptotic P values leading to the selection of a single model; inference is then conditional on the selected model. The sampling properties of such a strategy are complex, and the failure to take account of model uncertainty leads to underestimation of uncertainty about quantities of interest. In principle, a panacea is provided by the standard Bayesian formalism that averages the posterior distributions of the quantity of interest under each of the models, weighted by their posterior model probabilities. Furthermore, this approach is optimal in the sense of maximizing predictive ability. But this has not been used in practice, because computing the posterior model probabilities is hard and the number of models is very large (often greater than 1011). We argue that the standard Bayesian formalism is unsatisfactory and propose an alternative Bayesian approach that, we contend, takes full account of the true model uncertainty by averaging over a much smaller set of models. An efficient search algorithm is developed for finding these models. We consider two classes of graphical models that arise in expert systems: the recursive causal models and the decomposable log-linear models. For each of these, we develop efficient ways of computing exact Bayes factors and hence posterior model probabilities. For the decomposable log-linear models, this is based on properties of chordal graphs and hyper-Markov prior distributions and the resultant calculations can be carried out locally. The end product is an overall strategy for model selection and accounting for model uncertainty that searches efficiently through the very large classes of models involved. Three examples are given. The first two concern data sets that have been analyzed by several authors in the context of model selection. The third addresses a urological diagnostic problem. In each example, our model averaging approach provides better out-of-sample predictive performance than any single model that might reasonably have been selected. Abstract We consider the problem of model selection and accounting for model uncertainty in high-dimensional contingency tables, motivated by expert system applications. The approach most used currently is a stepwise strategy guided by tests based on approximate asymptotic P values leading to the selection of a single model; inference is then conditional on the selected model. The sampling properties of such a strategy are complex, and the failure to take account of model uncertainty leads to underestimation of uncertainty about quantities of interest. In principle, a panacea is provided by the standard Bayesian formalism that averages the posterior distributions of the quantity of interest under each of the models, weighted by their posterior model probabilities. Furthermore, this approach is optimal in the sense of maximizing predictive ability. But this has not been used in practice, because computing the posterior model probabilities is hard and the number of models is very large (often greater than 1011). We argue that the standard Bayesian formalism is unsatisfactory and propose an alternative Bayesian approach that, we contend, takes full account of the true model uncertainty by averaging over a much smaller set of models. An efficient search algorithm is developed for finding these models. We consider two classes of graphical models that arise in expert systems: the recursive causal models and the decomposable log-linear models. For each of these, we develop efficient ways of computing exact Bayes factors and hence posterior model probabilities. For the decomposable log-linear models, this is based on properties of chordal graphs and hyper-Markov prior distributions and the resultant calculations can be carried out locally. The end product is an overall strategy for model selection and accounting for model uncertainty that searches efficiently through the very large classes of models involved. Three examples are given. The first two concern data sets that have been analyzed by several authors in the context of model selection. The third addresses a urological diagnostic problem. In each example, our model averaging approach provides better out-of-sample predictive performance than any single model that might reasonably have been selected.},
author = {Madigan, David and Raftery, Adrian E.},
booktitle = {Journal of the American Statistical Association},
doi = {10.1080/01621459.1994.10476894},
file = {:E$\backslash$:/Dropbox/phd/bayesian/Bma/madigan1994.pdf:pdf},
isbn = {0162-1459},
issn = {0162-1459},
number = {428},
pages = {1535--1546},
title = {{Model Selection and Accounting for Model Uncertainty in Graphical Models Using Occam's Window}},
url = {http://www.tandfonline.com/doi/abs/10.1080/01621459.1994.10476894},
volume = {89},
year = {1994}
}