Jacob Hummel Updated bibliography.  about 8 years ago

Commit id: dd05df9aa1f594a507440a3302a052f450655242

deletions | additions      

       

BibTeX export options can be customized via Options -> BibTeX in Mendeley Desktop  @inproceedings{Springel2014, @misc{Pontzenetal2013,  abstract = {Numerical methods play an ever more important role in astrophysics. This {Pynbody  isespecially true in theoretical works, but of course, even in purely observational projects, data analysis without massive use of computational methods has become unthinkable. The key utility of computer simulations comes from their ability to solve complex systems of equations that are either intractable with analytic techniques or only amenable to highly approximative treatments. Simulations are best viewed as  a powerful complement to analytic reasoning, and as the method of choice to model systems that feature enormous physical complexity such as star formation in evolving galaxies, the topic of this 43rd Saas Fee Advanced Course. The organizers asked me to lecture about high performance computing and numerical modelling in this winter school, and to specifically cover the basics of numerically treating gravity lightweight, portable, format-transparent analysis package for astrophysical N-body  and hydrodynamics in the context of galaxy evolution. This is still a vast field, smooth particle hydrodynamic simulations supporting PKDGRAV/Gasoline, Gadget, N-Chilada,  and I necessarily had to select a subset of the relevant material. The written notes presented here quite closely follow the lectures as held RAMSES AMR outputs. Written  in Villars-sur-Ollon, which were meant to provide a general overview about some of python,  the most pertinent techniques that may be relevant for students working on numerical models of galaxy evolution and star formation. The discussion is hence often at an introductory level, giving precedence to a presentation of the main numerical concepts rather than to core tools are accompanied by  a mathematically detailed exposition library  of the techniques.},  address = {Villars-sur-Ollon, Switzerland},  archivePrefix = {arXiv},  arxivId = {1412.5187}, publication-level analysis routines.},  author = {Springel, Volker}, {Pontzen, Andrew and Ro{\v{s}}kar, Rok and Stinson, Greg and Woods, Rory},  booktitle = {Lecture notes given at the 43rd Saas Fee Advanced School, March 11-16, 2013},  eprint = {1412.5187},  file = {:home/jhummel/documents/papers/literature/2014/Springel{\_}2014.pdf:pdf},  month = {dec},  pages = {108}, {Astrophysics Source Code Library, ascl:1305.002},  title = {{High performance computing and numerical modelling}}, {{pynbody: N-Body/SPH analysis for python}},  url = {http://arxiv.org/abs/1412.5187 http://adsabs.harvard.edu/abs/2014arXiv1412.5187S}, {http://adsabs.harvard.edu.ezproxy.lib.utexas.edu/abs/2013ascl.soft05002P},  year = {2014} {2013}  }  @article{Oesch2010b,  abstract = {We present a first morphological study of z {\~{}} 7-8 Lyman break galaxies (LBGs) from Oesch et al. and Bouwens et al. detected in ultra-deep near-infrared imaging of the Hubble Ultra-Deep Field (HUDF) by the HUDF09 program. With an average intrinsic size of 0.7 ± 0.3 kpc, these galaxies are found to be extremely compact, having an average observed surface brightness of $\mu$ J {\#}{\#}IMG{\#}{\#} [http://ej.iop.org/icons/Entities/sime.gif] {\{}sime{\}} 26 mag arcsec –2 , and only two out of the full sample of 16 z {\~{}} 7 galaxies show extended features with resolved double cores. By comparison to lower redshift LBGs, it is found that only little size evolution takes place from z {\~{}} 7 to z {\~{}} 6, while galaxies between z {\~{}} 4-5 show more extended wings in their apparent profiles. The average size scales as (1 + z ) – m with m = 1.12 ± 0.17 for galaxies with luminosities in the range (0.3-1) L * z =3 and with m = 1.32 ± 0.52 for (0.12-0.3) L * z =3 , consistent with galaxies having constant comoving sizes. The peak of the size distribution changes only slowly from z {\~{}} 7 to z {\~{}} 4. However, a tail of larger galaxies ( {\#}{\#}IMG{\#}{\#} [http://ej.iop.org/icons/Entities/gsim.gif] {\{}gsim{\}} 1.2 kpc) is gradually built up toward later cosmic times, possibly via hierarchical build-up or via enhanced accretion of cold gas. Additionally, the average star formation surface density of LBGs with luminosities (0.3-1) L * z =3 is nearly constant at $\Sigma$ SFR = 1.9 M {\#}{\#}IMG{\#}{\#} [http://ej.iop.org/icons/Entities/sun.gif] {\{}sun{\}} yr –1 kpc –2 over the entire redshift range z {\~{}} 4-7 suggesting similar star formation efficiencies at these early epochs. The above evolutionary trends seem to hold out to z {\~{}} 8 though the sample is still small and possibly incomplete.}, 

volume = {450},  year = {2015}  }  @article{Pontzenetal2013,  abstract = {Pynbody is a lightweight, portable, format-transparent analysis package for astrophysical N-body and smooth particle hydrodynamic simulations supporting PKDGRAV/Gasoline, Gadget, N-Chilada, and RAMSES AMR outputs. Written in python, the core tools are accompanied by a library of publication-level analysis routines.},  author = {Pontzen, Andrew and Ro{\v{s}}kar, Rok and Stinson, Greg and Woods, Rory},  journal = {Astrophysics Source Code Library},  title = {{pynbody: N-Body/SPH analysis for python}},  url = {http://adsabs.harvard.edu.ezproxy.lib.utexas.edu/abs/2013ascl.soft05002P},  year = {2013}  }  @article{SpringelYoshidaWhite2001,  abstract = {We describe the newly written code GADGET which is suitable both for cosmological simulations of structure formation and for the simulation of interacting galaxies. GADGET evolves self-gravitating collisionless fluids with the traditional N-body approach, and a collisional gas by smoothed particle hydrodynamics. Along with the serial version of the code, we discuss a parallel version that has been designed to run on massively parallel supercomputers with distributed memory. While both versions use a tree algorithm to compute gravitational forces, the serial version of GADGET can optionally employ the special-purpose hardware GRAPE instead of the tree. Periodic boundary conditions are supported by means of an Ewald summation technique. The code uses individual and adaptive timesteps for all particles, and it combines this with a scheme for dynamic tree updates. Due to its Lagrangian nature, GADGET thus allows a very large dynamic range to be bridged, both in space and time. So far, GADGET has been successfully used to run simulations with up to 7.5×10 7 particles, including cosmological studies of large-scale structure formation, high-resolution simulations of the formation of clusters of galaxies, as well as workstation-sized problems of interacting galaxies. In this study, we detail the numerical algorithms employed, and show various tests of the code. We publicly release both the serial and the massively parallel version of the code.},  author = {Springel, Volker and Yoshida, Naoki and White, Simon D.M.}, 

volume = {6},  year = {2001}  }  @inproceedings{Springel2014,  abstract = {Numerical methods play an ever more important role in astrophysics. This is especially true in theoretical works, but of course, even in purely observational projects, data analysis without massive use of computational methods has become unthinkable. The key utility of computer simulations comes from their ability to solve complex systems of equations that are either intractable with analytic techniques or only amenable to highly approximative treatments. Simulations are best viewed as a powerful complement to analytic reasoning, and as the method of choice to model systems that feature enormous physical complexity such as star formation in evolving galaxies, the topic of this 43rd Saas Fee Advanced Course. The organizers asked me to lecture about high performance computing and numerical modelling in this winter school, and to specifically cover the basics of numerically treating gravity and hydrodynamics in the context of galaxy evolution. This is still a vast field, and I necessarily had to select a subset of the relevant material. The written notes presented here quite closely follow the lectures as held in Villars-sur-Ollon, which were meant to provide a general overview about some of the most pertinent techniques that may be relevant for students working on numerical models of galaxy evolution and star formation. The discussion is hence often at an introductory level, giving precedence to a presentation of the main numerical concepts rather than to a mathematically detailed exposition of the techniques.},  address = {Villars-sur-Ollon, Switzerland},  archivePrefix = {arXiv},  arxivId = {1412.5187},  author = {Springel, Volker},  booktitle = {Lecture notes given at the 43rd Saas Fee Advanced School, March 11-16, 2013},  eprint = {1412.5187},  file = {:home/jhummel/documents/papers/literature/2014/Springel{\_}2014.pdf:pdf},  month = {dec},  title = {{High performance computing and numerical modelling}},  url = {http://arxiv.org/abs/1412.5187 http://adsabs.harvard.edu/abs/2014arXiv1412.5187S},  year = {2014}  }  @article{Akeretetal2013,  abstract = {We study the benefits and limits of parallelised Markov chain Monte Carlo (MCMC) sampling in cosmology. MCMC methods are widely used for the estimation of cosmological parameters from a given set of observations and are typically based on the Metropolis-Hastings algorithm. Some of the required calculations can however be computationally intensive, meaning that a single long chain can take several hours or days to calculate. In practice, this can be limiting, since the MCMC process needs to be performed many times to test the impact of possible systematics and to understand the robustness of the measurements being made. To achieve greater speed through parallelisation, MCMC algorithms need to have short autocorrelation times and minimal overheads caused by tuning and burn-in. The resulting scalability is hence influenced by two factors, the MCMC overheads and the parallelisation costs. In order to efficiently distribute the MCMC sampling over thousands of cores on modern cloud computing infrastructure, we developed a Python framework called CosmoHammer which embeds emcee, an implementation by Foreman-Mackey et al. (2012) of the affine invariant ensemble sampler by Goodman and Weare (2010). We test the performance of CosmoHammer for cosmological parameter estimation from cosmic microwave background data. While Metropolis-Hastings is dominated by overheads, CosmoHammer is able to accelerate the sampling process from a wall time of 30 h on a dual core notebook to 16 min by scaling out to 2048 cores. Such short wall times for complex datasets open possibilities for extensive model testing and control of systematics.},  author = {Akeret, Jo{\"{e}}l and Seehars, Sebastian and Amara, Adam and Refregier, Alexandre and Csillaghy, Andr{\'{e}}}, 

@book{McKinney2012,  author = {McKinney, Wes},  isbn = {978-1-4493-1979-3},  pages = {466},  publisher = {O'Reilly Media},  title = {{Python for Data Analysis}},  year = {2012} 

doi = {10.1145/2833157.2833162},  file = {:home/jhummel/documents/papers/literature/2015/Lam, Pitrou, Seibert{\_}2015.pdf:pdf},  isbn = {9781450340052},  pages = {1--6},  publisher = {ACM Press},  title = {{Numba}},  url = {http://dl.acm.org/citation.cfm?doid=2833157.2833162},