import numpy as np # This routine both stores and provides ways of accessing observational data on the average sSFR of star forming galaxies as a function of stellar mass and redshift # The compilation was constructed in the autumn of 2013 for use in Mitchell, Lacey, Cole & Baugh (2014) # Data can be accessed either by # a) requesting a stellar mass bin (central value and a threshold) to obtain datapoints for the average sSFR in that mass bin as a function of redshift - Get_SSFR_Evo() # b) requesting a redshift bin (central value and a threshold) to obtain datapoints for the average sSFR in that redshift bin as a function of stellar mass - Get_SSFR_Mass() # An example extraction is presented at the bottom of the script. # Within each dataset, there should be the following information: # label = the source of the data # imf = the assumed initial mass function (where the orginal author specified which was used) # average = whether the average specific star formation rates are median, mean or unspecified # z = the redshift of each datapoint # logm = log_10(stellar mass / solar mass) for each datapoint # logssfr = log_10(average specific star formation rate / Gyr) for each datapoint # lo = lower statistical errorbar (dex) (-1 sigma) # hi = upper statistical errorbar (dex) (+1 sigma) # These errorbars represent only the statistical uncertainity on the average specific star formation rate when specified by the author. # In some cases, we can estimate a lower limit on this uncertainty if information on the number of galaxies used to compute the average in each mass/redshift bin is available. (Poisson) # When no errorbar could be extracted from the literature, hi and lo are both set to 0. class Bauer13: """ Takem from graph clicking of Bauer et al. (2013)""" label = "Bauer et al. (2013)" imf = "chabrier" average = "median" z = [0.275 , 0.275 , 0.275 , 0.245 , 0.245 , 0.245 , 0.215 , 0.215 , 0.215 , 0.215 , 0.185 , 0.185 , 0.185 , 0.185 , 0.125 , 0.125 , 0.125 , 0.125 , 0.08 , 0.08 , 0.08 , 0.08] logm = [10.0 , 10.5 , 11.0 , 10.0 , 10.5 , 11.0 , 10.0 , 10.5 , 9.5 , 11.0 , 9.5 , 10.0 , 10.5 , 11.0 , 9.5 , 10.0 , 10.5 , 11.0 , 9.0 , 9.5 , 10.0 , 10.5] logssfr = np.array([-9.41 , -9.82 , -10.44, -9.45 , -9.87 , -10.49, -9.57 , -9.9 , -9.13 , -10.5 , -9.4 , -9.72 , -10.08, -10.41, -9.63 , -9.96 , -10.25, -10.62, -9.53, -9.88, -10.09, -10.43]) + 9.0 lo = [0.35 , 0.35 , 0.56 , 0.29 , 0.38 , 0.51 , 0.31 , 0.41 , 0.28 , 0.6 , 0.32 , 0.39 , 0.49 , 0.59 , 0.34 ,0.35 , 0.49 , 0.58 , 0.26 , 0.3 , 0.37 , 0.62] hi = [0.35 , 0.35 , 0.55 , 0.29 , 0.38 , 0.51 , 0.3 , 0.41 , 0.28 , 0.6 , 0.32 , 0.38 , 0.49 , 0.59 , 0.36 , 0.35 , 0.5 , 0.57 , 0.25 , 0.31 , 0.4 , 0.61] class Bell07: """Taken from graph clicking of Bell et al. (2007)""" label = "Bell et al. (2007)" imf = "chabrier" average = "median" z = [ 0.9 , 0.9 , 0.9 , 0.9 , 0.9 , 0.7 , 0.7 , 0.7 , 0.7 , 0.7 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.3 , 0.3 , 0.3 , 0.3 ] logm = [ 9.4 , 9.74 , 10.22 , 10.7 , 11.23 , 9.33 , 9.75 , 10.25 , 10.73 , 11.19 , 9.27 , 9.75 , 10.23 , 10.71 , 11.24 , 9.24 , 9.74 , 10.22 , 10.7 ] logssfr = [ 0.08, 0.08 , -0.12 , -0.41, -0.8 , -0.07 , -0.16, -0.3 , -0.54 , -0.8 , -0.26, -0.37, -0.49 , -0.69 , -0.83 , -0.44, -0.52, -0.58 , -0.75] lo = np.zeros(len(logm)); hi = np.zeros(len(logm)) class Gilbank11: """Taken from graph clicking of Gilbank et al. (2011)""" label = "Gilbank et al. (2011)" imf = "BG03" average = "mean" z = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 , 0.1, 0.1, 0.1, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] logm = [8.6, 8.8, 9.0, 9.2, 9.4, 9.6, 9.8, 10.0, 10.2, 10.4, 10.6, 10.8, 11.0 , 8.7, 9.0, 9.35, 10.15, 10.62 , 11.05] logssfr = np.array([-9.81 , -9.85 , -9.86 , -9.86 , -9.87 , -0.99 , -9.91 , -9.96 , -10.06 , -10.12 , -10.22 , -10.33 , -10.42 , -9.14 , -9.24 , -9.46 , -9.74 , -9.83 , -9.98]) + 9.0 lo = np.zeros(len(logm)); hi = np.zeros(len(logm)) class Gonzalez12: """ Taken from graph clicking of Gonzalez et al. (2012), rising sfh with nebula line corrections""" label = "Gonzalez et al. (2012)" imf = "salpeter" average = "mean" z = [4.0,5.0,6.0,4.0,5.0,6.0] logm = np.array([9.7,9.7,9.7,9.0,9.0,9.0])-0.24 logssfr = [0.51,0.50,0.65,0.61,0.73,0.73] lo = [0.04,0.04,0.04,0.04,0.07,0.06] hi = [0.04,0.04,0.04,0.04,0.07,0.06] class Greene12: """ Taken from graph clicking of Greene et al. (2012) """ label = "Greene et al. (2012)" imf = "BG03" average = "mean" z = [0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75] logm = [8.0, 8.3, 8.5, 8.8, 9.1, 9.3, 9.6, 9.8, 10.0, 10.3, 10.5, 10.8] logssfr = np.array([-9.07,-9.02,-8.95,-9.23,-9.37,-9.62,-9.73,-9.67,-10.02,-9.69,-9.81,-9.91]) + 9.0 lo = [0.77 ,0.24 ,0.07 ,0.05 ,0.06 ,0.09 ,0.11 ,0.13 ,0.2 ,0.28 ,0.44 ,0.77] hi = [0.52, 0.23, 0.09, 0.06, 0.07, 0.07, 0.12 ,0.13, 0.2, 0.23, 0.37, 0.51] class Huang12: """ Taken from graph clicking of Huang et al. (2012)""" label = "Huang et al. (2012)" imf = "chabrier" average = "median" logm = [7.8, 8.3, 8.8, 9.2, 9.7, 10.2, 10.7, 11.1] logssfr = np.array([-9.13,-9.28,-9.45,-9.56,-9.72,-9.99,-10.23,-10.62])+9.0 z = np.zeros_like(logssfr) lo = [0.01,0.006,0.004,0.004,0.004,0.005,0.007,0.02] hi = [0.01,0.006,0.004,0.004,0.004,0.005,0.007,0.02] class Karim11_SF: """ Taken from Table 4 in Karim et al. (2011). This is for star forming galaxies defined using their default blue colour cut.""" label = "Karim et al. (2011), SF Systems" imf = "chabrier" average = "median" logm, z, sfr, hi_lin, lo_lin = np.loadtxt("/gpfs/data/d72fqv/Obs_Data/Karim11_Data/table_full_sfg_mr.txt",unpack=True,usecols=(2,5,18,19,20)) logm = np.array([9.580,9.579,9.576,9.581,9.579,9.581,9.596,9.615, 9.994,9.987,9.976,9.989,9.990,9.973,9.975,9.983,9.986, 10.375,10.367,10.385,10.383,10.396,10.378,10.367,10.371,10.377 ,10.745,10.750,10.751,10.750,10.750,10.749,10.767,10.769,10.765 ,11.099,11.100,11.103,11.100,11.128,11.106,11.114,11.152,11.172]) z = np.array([0.276,0.487,0.687,0.892,1.101,1.403,1.794,2.172, 0.280,0.490,0.684,0.892,1.094,1.392,1.849,2.253,2.709, 0.291 ,0.489 ,0.685 ,0.893 ,1.095 ,1.378 ,1.807 ,2.320 ,2.776 ,0.280 ,0.484 ,0.687 ,0.890 ,1.101 ,1.369 ,1.787 ,2.222 ,2.720 ,0.286 ,0.476 ,0.689 ,0.887 ,1.103 ,1.355 ,1.797 ,2.217 ,2.714]) sfr = np.array([0.854,1.498,2.111,3.551,5.887,4.517,8.540,12.743, 1.471,3.365,5.444,6.833,11.857,13.898,28.515,27.636,29.510, 2.356,6.197,9.430,16.021,21.542,29.895,57.326,72.455,103.546 ,3.980 ,7.880 ,16.440,21.328,28.653,50.899,83.900,112.262,198.359 ,5.895,14.096,19.482,34.858,49.155,82.323,174.605,203.471,334.411]) hi_lin=np.array([0.186,0.310,0.707,0.816,1.864,1.672,5.363,5.103, 0.128,0.542,0.685,1.022,2.447,3.101,6.159,9.848,20.599, 0.283,1.168,0.817,1.525,2.337,2.696,6.513,7.048,18.146, 0.470 ,1.309 ,2.617 ,2.801 ,2.089 ,4.714 ,9.927 ,24.181,18.223 ,1.630,1.556,3.069,6.145,10.247,12.184,24.566,21.609,42.354]) lo_lin=np.array([0.184,0.450,0.416,1.074,1.247,1.680,7.448,5.877, 0.229,0.518,1.295,1.642,2.247,3.134,5.184,5.200,22.695, 0.116,0.683,0.737,1.622,2.009,2.483,7.466,11.704,20.819, 0.616 ,1.401 ,1.501 ,2.365 ,3.081 ,5.403 ,6.138 ,17.702 ,24.178, 1.808,2.338,3.810,7.340,9.035,9.245,36.412,17.222,68.490]) logssfr = np.log10(sfr) - logm + 9. #log(ssfr/Gyr) lo = np.log10(sfr / (sfr - lo_lin)); hi = np.log10(1. + hi_lin/sfr) class Karim11_Active: """ Taken from graph clicking the appendix of Karim et al. (2011), this is for star forming galaxies defined using the stricter blue sfg cut discussed in the appendix""" label = "Karim et al. (2011), Active Population" imf = "chabrier" average = "median" z = [2.75,2.75,2.75,2.75, 2.25,2.25,2.25,2.25,2.25, 1.8, 1.8, 1.8, 1.8, 1.8, 1.4, 1.4, 1.4, 1.4, 1.4, 1.1, 1.1, 1.1, 1.1, 1.1, 0.9, 0.9, 0.9, 0.9, 0.9, 0.7, 0.7, 0.7, 0.7, 0.7, 0.5, 0.5, 0.5, 0.5, 0.3, 0.3, 0.3, 0.3] logm = [9.7,10.1,10.5,10.9 , 9.4 ,9.7 ,10.1,10.5,10.9, 9.3, 9.7, 10.1,10.5,10.9, 9.3, 9.7, 10.1,10.5,10.8, 9.3, 9.7, 10.1,10.5,10.8, 9.3, 9.7, 10.1,10.5,10.8, 9.3, 9.7, 10.1, 10.5, 10.8, 9.3, 9.7, 10.0, 10.4, 9.3, 9.7, 10.0, 10.4] logssfr = [0.38,0.71,0.69,0.52, 0.63,0.49,0.53,0.51,0.37, 0.37,0.39,0.48,0.43,0.3, 0.15,0.18,0.27,0.24,0.26, 0.17,0.23,0.18,0.12,0.07, -0.18,0.03,0.06,0.01,0.08, -0.12,-0.09,-0.07,-0.11,-0.23, -0.25,-0.15,-0.16,-0.21, -0.3, -0.38, -0.45, -0.34] lo = [0.56,0.1 ,0.08,0.06, 0.2 ,0.15,0.15,0.08,0.06, 0.55,0.16,0.06,0.06,0.06, 0.24,0.20,0.06,0.04,0.06, 0.20,0.11,0.06,0.06,0.07, 0.13, 0.14,0.05,0.05,0.07, 0.49, 0.11, 0.05, 0.07, 0.07, 0.09, 0.10, 0.07, 0.06, 0.20, 0.06, 0.07, 0.15] hi = [0.48,0.14,0.09,0.08, 0.27,0.16,0.17,0.08,0.07, 0.45,0.13,0.11,0.08,0.05, 0.20,0.11,0.09,0.04,0.09, 0.16,0.09,0.07,0.07,0.08, 0.19, 0.14,0.06,0.05,0.05, 0.35, 0.05, 0.05, 0.06, 0.06, 0.03, 0.10, 0.07, 0.05, 0.07, 0.07, 0.06, 0.09] class Lin12: """ Taken from graph clicking Lin et al. (2012)""" label = "Lin et al. (2012)" imf = "salpeter" average = "median" z = [2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0] logm = np.array([9.3,9.5,9.8,10.0,10.2,10.4,10.7,10.9,9.1])-0.24 logssfr = np.array([-8.6,-8.65,-8.73,-8.83,-8.87,-8.94,-9.10,-8.89,-8.49])+9.0 lo = [0.009,0.02,0.01,0.01,0.01,0.02,0.02,0.02,0.01] hi = [0.01,0.02,0.02,0.02,0.02,0.02,0.03,0.03,0.01] class Magdis10: """ Taken from graph clicking of Magdis et al. (2010)""" label = "Magdis et al. (2010)" imf = "chabrier" average = "unknown" z = [3.0] logm = [10.7] logssfr =[0.67] lo = [0.0] hi = [0.0] class Noeske07: """ Taken from graph clicking of Noeske et al. (2007)""" label = "Noeske et al. (2007)" imf = "unknown" average = "median" z = [0.975,0.975,0.975, 0.775,0.775,0.775,0.775,0.775, 0.325,0.325,0.325,0.325,0.325,0.325,0.325,0.325,0.325, 0.575,0.575,0.575,0.575,0.575,0.575,0.575,0.575,0.575] logm = [11.10,11.26,11.41, 10.6, 10.8, 10.9, 11.1, 11.2, 9.7, 9.9, 10.0, 10.2, 10.3, 10.5, 10.6, 10.8, 11.1, 10.1, 10.3, 10.4, 10.6, 10.7, 10.9, 11.0, 11.2, 11.3] logssfr = np.array([-9.58,-9.72,-9.88, -9.42,-9.42,-9.64,-9.83,-9.64, -9.52,-9.43,-9.59,-9.65,-9.67,-9.77,-9.90,-9.81,-10.08, -9.37,-9.46,-9.46,-9.66,-9.69,-9.66,-9.75,-9.94,-10.28]) + 9.0 lo = np.array([0.35,0.32,0.58, 0.33,0.30,0.33,0.43,0.25, 0.40, 0.26, 0.32, 0.34, 0.35, 0.25, 0.33, 0.27, 0.46, 0.32, 0.34, 0.37, 0.28, 0.31, 0.35, 0.37, 0.47, 0.28])*0.1 hi = np.copy(lo) class Oliver10: """ Taken from graph clicking of Oliver et al. (2010)""" label = "Oliver et al. (2012)" imf = "unknown" average = "mean" z = [1.75,1.75, 1.35,1.35, 1.1, 1.1, 0.9,0.9,0.9, 0.8,0.8,0.8,0.8, 0.55,0.55,0.55,0.55,0.55, 0.45,0.45,0.45,0.45,0.45, 0.35,0.35,0.35,0.35,0.35,0.35, 0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1] logm = [11.2,11.6, 11.2,11.6, 11.2,11.6, 10.9,11.2,11.5, 10.6,11.2,11.5,10.9, 10.4,10.6,10.9,11.2,11.5, 10.4,10.6,10.9,11.2,11.5, 10.1,10.4,10.6,10.9,11.2,11.5, 9.1,9.4,9.6,9.9,10.1,10.4,10.6,10.9,11.2] logssfr = [0.07,0.14, 0.04,-0.10, -0.24,-0.35, -0.28,-0.46,-0.53, -0.48,-0.66,-0.74,-0.57, -0.76,-0.63,-0.73,-0.78,-0.85, -1.04,0.77,-0.83,-0.89,-0.84, -0.99,-0.88,-0.89,-0.98,-0.96,-0.98, -1.10,-1.19,-1.15,-1.15,-1.08,-1.08,-1.03,-1.09,-1.12] lo = np.array([0.11,0.02, 0.08,0.06, 0.07,0.08, 0.08,0.09,0.11, 0.13, 0.05, 0.05,0.11, 0.18,0.08,0.06,0.03,0.04, 0.17,0.11,0.04,0.07,0.08, 0.16,0.04,0.02,0.04,0.06,0.09, 0.18,0.07,0.09,0.07,0.07,0.02,0.02,0.02,0.02]) hi = np.copy(lo) class Panella09: """ Taken from graph clicking of Panella et al. (2009)""" label = "Panella et al. (2009)" imf = "salpeter" average = "unknown" z = [1.6,1.6,1.6,1.6,1.6, 2.1,2.1,2.1,2.1,2.1, 1.4, 1.9] logm = np.array([10.2,10.3,10.5,10.7,11.1, 10.2,10.3,10.5,10.6,10.9, 10.5, 10.5]) -0.24 logssfr = [0.07,-0.11,0.07,0.03,-0.03, 0.39,0.59,0.42,0.54,0.44, 0.07,0.52] lo = [0.18,0.20,0.15,0.16,0.16, 0.18,0.18,0.16,0.16,0.15, 0.11, 0.13] hi = [0.14,0.14,0.14,0.11,0.11, 0.14,0.11,0.14,0.11,0.11, 0.18, 0.16] class Peng10: """ Taken from graph clicking all zCOSMOS + SDSS in Peng et al. (2010)""" label = "Peng et al. (2010)" imf = "chabrier" average = "mean" z = [ 0.84090512, 0.59882974, 0.4223634, 0.2158189, 0.06898405] # Obtained assuming Peng et al. cosmology h = 0.7,omm = 0.25,oml = 0.75 logm = [10.0,10.0,10.0,10.0,10.0] logssfr = [-0.28,-0.37,-0.52,-0.61,-0.97] lo = [0.0,0.0,0.0,0.0,0.0] hi = [0.0,0.0,0.0,0.0,0.0] class Reddy12: """ Taken from graph clicking of Reddy et al. (2012)""" label = "Reddy et al. (2012)" imf = "salpeter" average = "mean" z = [2.3,3.0] logm = np.array([9.7,9.7]) -0.24 logssfr = [0.38,0.35] lo = [0.0,0.0] hi = [0.0,0.0] class Rodighiero10: """ Taken from graph clicking of Rodighiero et al. (2010)""" label = "Rodighiero et al. (2010)" imf = "salpeter" average = "mean" z = [0.25,0.75,1.25, 0.25,0.75,1.25,1.75, 0.25,0.75,1.25,1.75,2.25, 0.25,0.75,1.25,1.75,2.25] logm = np.array([9.75,9.75,9.75, 10.25,10.25,10.25,10.25, 10.75,10.75,10.75,10.75,10.75, 11.25,11.25,11.25,11.25,11.25]) - 0.24 logssfr = [-0.73,-0.17,0.23, -0.76,-0.11,0.20,0.26, -.089,0.29,0.0,-0.06,-0.23, -1.38,-0.57,-0.31,-0.23,-0.13] lo = np.array([0.03,0.08,0.08, 0.03,0.03,0.05,0.08, 0.01,0.01,0.02,0.07,0.05, 0.03,0.03,0.03,0.07,0.05]) hi = np.copy(lo) class SDSS_DR7: """ Taken from Claudias file from public SDSS DR7 data""" label = "SDSS DR7" imf = "chabrier" average = "median" logm = [ 8.125, 8.375, 8.625, 8.875, 9.125, 9.375, 9.625, 9.875, 10.125, 10.375, 10.625, 10.875, 11.125, 11.375, 11.625, 11.875] logssfr = [-0.005646, -0.25631, -0.349169, -0.404207, -0.509524, -0.606135, -0.7041834, -0.814317, -0.891007, -0.972899, -1.064655, -1.165776, -1.280782, -1.444261, -1.615333, -1.42538 ] lo = [ 0.04987972, 0.02751972, 0.02173844, 0.01931067, 0.01375512, 0.00934488, 0.00788301, 0.00608577, 0.00528931, 0.00455923, 0.0042894, 0.00460105, 0.00626384, 0.01139599, 0.03323199, 0.13610493 ] hi = [0.09716475, 0.07773723, 0.04134841, 0.03250055, 0.01906088, 0.01405107, 0.00931773, 0.00720658, 0.00588184, 0.00529926, 0.00530348, 0.00607182, 0.00870057, 0.02102521, 0.07473493, 0.32137491 ] z = np.zeros(len(hi)) + 0.077146 class Stark13: """ Taken from graph clicking of Stark et al. (2013), using the evolving H_alpha EW distribution""" label = "Stark et al. (2013)" imf = "salpeter" average = "mean" z = [4.0,5.0,6.0,7.0] logm = np.array([9.7,9.7,9.7,9.7]) -0.24 logssfr = [0.78, 0.75, 0.86, 1.15] lo = [0.0,0.0,0.0,0.0] hi = [0.0,0.0,0.0,0.0] class Wang12: """ Taken from graph clicking of Wang et al. (2012)""" label = "Wang et al. (2012)" imf = "chabrier" average = "unknown" z = [0.35,0.35,0.35,0.35,0.35,0.35,0.35, 0.65,0.65,0.65,0.65,0.65,0.65 , 0.9,0.9,0.9,0.9,0.9,0.9,0.9, 1.15,1.15,1.15,1.15,1.15,1.15, 1.45,1.45,1.45,1.45,1.45] logm = np.array([9.7,10.0,10.2,10.6,10.9,11.1,11.4, 9.95,10.25,10.6,10.8,11.1,11.4, 9.7,10.0,10.2,10.6,10.8,11.1,11.4, 10.0,10.2,10.6,10.8,11.1,11.4, 10.2,10.5,10.8,11.1,11.4]) logsfr = np.array([0.37,0.65,0.82,0.95,0.77,0.58,0.74, 0.99,1.22,1.38,1.38,1.34,1.26, 0.76,1.13,1.45,1.61,1.64,1.57,1.54, 1.27,1.53,1.79,1.93,1.92,1.83, 1.78,2.10,2.22,2.25,2.53]) lo = np.zeros_like(logm); hi = np.zeros_like(logm) logssfr = logsfr - logm + 9.0 class Whitaker12: """ Taken from graph clicking of Whitaker et al. (2012)""" label = "Whitaker et al. (2012)" imf = "chabrier" average = "median" z = [0.25,0.25,0.25,0.25,0.25,0.25,0.25, 0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75, 1.75,1.75,1.75,1.75,1.75, 2.25,2.25,2.25,2.25] logm = np.array([9.2,9.3,9.4,9.6,9.8,10.0,10.4, 9.45,9.55,9.65,9.75,9.85,10.0,10.25,10.5, 10.25,10.35,10.45,10.6,10.8, 10.45,10.6, 10.75,11.0]) logsfr = [-0.19,-0.07,0.02,0.17,0.26,0.52,0.66, 0.54,0.62,0.73,0.81,0.95,1.02,1.16,1.29, 1.76,1.83,1.84,1.96,2.05, 2.09,2.13,2.20,2.34] #lo=np.array([0.28,0.34,0.37,0.35,0.38,0.38,0.37, 0.31,0.32,0.34,0.37,0.36,0.38,0.37,0.40, 0.38,0.38,0.42,0.40,0.41, 0.26,0.34,0.34,0.36]) logssfr = logsfr - logm + 9.0 #hi = np.copy(lo) # These errors are dispersion, not error on median. lo = np.zeros_like(logm); hi = np.zeros_like(logm) list_str = ["Bauer13","Bell07","Gilbank11","Gonzalez12","Greene12","Huang12","Karim11_SF","Karim11_Active","Lin12","Magdis10","Noeske07","Oliver10","Panella09","Peng10","Reddy12","Rodighiero10","SDSS_DR7","Stark13","Wang12","Whitaker12"] list = [Bauer13,Bell07,Gilbank11,Gonzalez12,Greene12,Huang12,Karim11_SF,Karim11_Active,Lin12,Magdis10,Noeske07,Oliver10,Panella09,Peng10,Reddy12,Rodighiero10,SDSS_DR7,Stark13,Wang12,Whitaker12] def Get_Data(name): index = list_str.index(name) label = list[index].label imf = list[index].imf average = list[index].average z = np.array(list[index].z) logm = np.array(list[index].logm) lo = np.array(list[index].lo) hi = np.array(list[index].hi) logssfr = np.array(list[index].logssfr) return z,logm,logssfr,lo,hi def Get_SSFR_Evo(logm_bin,logm_tolerance,list_str): ''' Get observational datapoints on the average specific star formation rate of star forming galaxies within a given stellar mass range inputs: logm_bin = central log(stellar mass) value logm_tolerance = half the width of the log(stellar mass) bin list_str = list of the names of the desired datasets outputs: z_out = redshift logssfr_out = log(sSFR / Gyr) lo = low errorbar on sSFR / dex hi = high errorbar on sSFR / dex''' z_out = []; logssfr_out = []; lo_out = []; hi_out = [] for name in list_str: z,logm,logssfr,lo,hi = Get_Data(name) z_unique = np.unique(z) for z_ind in range(len(z_unique)): z_bin = z==z_unique[z_ind] ok = (logm[z_bin] <= logm_bin + logm_tolerance) & (logm[z_bin] >= logm_bin - logm_tolerance) if len(logm[z_bin][ok])>0: ind_best = np.argmin(abs(logm[z_bin][ok]-logm_bin)) z_out.append(z[z_bin][ok][ind_best]) logssfr_out.append(logssfr[z_bin][ok][ind_best]) lo_out.append(lo[z_bin][ok][ind_best]) hi_out.append(hi[z_bin][ok][ind_best]) if logssfr[z_bin][ok][ind_best].max() > 12: print "data has ssfr with probably wrong units, data = ",name exit() return z_out,logssfr_out,lo_out,hi_out def Get_SSFR_Mass(z_in,z_tolerance,list_str): ''' Get observational datapoints on the average specific star formation rate of star forming galaxies within a given redshift range inputs: z_in = central redshift value z_tolerance = width of redhift bin list_str = list of the names of the desired datasets outputs: logm_out = log(stellar mass / solar mass) logssfr_out = log(sSFR / Gyr) lo = low errorbar on sSFR / dex hi = high errorbar on sSFR / dex''' logssfr_out = []; lo_out = []; hi_out = []; logm_out = [] for name in list_str: z,logm,logssfr,lo,hi = Get_Data(name) ok = (z <= z_in + z_tolerance) & (z >= z_in - z_tolerance) for mindex in range(len(logm[ok])): logm_out.append(logm[ok][mindex]) logssfr_out.append(logssfr[ok][mindex]) lo_out.append(lo[ok][mindex]) hi_out.append(hi[ok][mindex]) return np.array(logm_out),np.array(logssfr_out),np.array(lo_out),np.array(hi_out) # Example usage of the script if __name__=='__main__': from pylab import *; from scipy import integrate # example cosmological parameters (used to calculate lookback time) h = 0.7; omm = 0.25; oml = 0.75 H = lambda z: 100 * h * (omm*(1.0+z)**3.0 + oml)**0.5 age_int = lambda z: 1.0/((1+z)*H(z)) # calculate the age of the universe at z=0 age_universe = integrate.quad(age_int,float(0.),np.inf)[0] * 979.16 # Desired list of sources of observational data name_list = ["Bauer13","Bell07","Gilbank11","Gonzalez12","Greene12","Huang12","Karim11_SF","Karim11_Active","Lin12","Magdis10","Noeske07","Oliver10","Panella09","Peng10","Reddy12","Rodighiero10","Stark13","Wang12","Whitaker12"] # Example of extracting data as a function of redshift for a given stellar mass bin # central log(stellar mass) value logm = 9.5 # half the width of the stellar mass bin / dex logm_tolerance = 0.24 # Get the data z,logssfr,lo,hi = Get_SSFR_Evo(logm,logm_tolerance,name_list) # Calculate the lookback time of each datapoint tlb = np.zeros_like(z) for n in range(len(tlb)): tlb[n] = age_universe - integrate.quad(age_int,z[n],np.inf)[0] * 979.16 errorbar(tlb,logssfr,yerr=[lo,hi],fmt="kx",alpha=0.5,ms=3,ecolor='0.55' ) show()