Geophysical characteristics of mitigation pathways (Supplementary Material, Table 2.SM.12)¶
Notebook sr15_2.SM.4_geophysical_characteristics¶
This notebook is based on the Release 1.1 of the IAMC 1.5C Scenario Explorer and Data and refers to the published version of the IPCC Special Report on Global Warming of 1.5C (SR15).
The notebook is run with pyam release 0.5.0.
The source code of this notebook is available on GitHub (release 2.0.2).
IPCC SR15 scenario assessment¶
Geophysical characteristics of mitigation pathways¶
This notebook computes Ggeophysical characteristics of mitigation pathways in the IPCC's "Special Report on Global Warming of 1.5°C". The notebook generates the data for Table 2.SM.12 in the Special Report.
The scenario data used in this analysis can be accessed and downloaded at https://data.ene.iiasa.ac.at/iamc-1.5c-explorer.
Load pyam
package and other dependencies¶
import pandas as pd
import numpy as np
import yaml
import pyam
Import scenario data, categorization and specifications files¶
The metadata file with scenario categorisation and quantitative indicators can be downloaded at https://data.ene.iiasa.ac.at/iamc-1.5c-explorer.
Alternatively, it can be re-created using the notebook sr15_2.0_categories_indicators
.
The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r2.0.xlsx')
sr1p5.load_meta('sr15_metadata_indicators.xlsx')
with open("sr15_specs.yaml", 'r') as stream:
specs = yaml.load(stream, Loader=yaml.FullLoader)
cats = specs.pop('cats')
Downselect scenario ensemble to categories of interest for this assessment¶
sr1p5.meta.rename(columns={'Kyoto-GHG|2010 (SAR)': 'kyoto_ghg_2010'}, inplace=True)
filter_args_aim = dict(model='AIM*',
scenario=['SFCM*_1p5Degree', 'EMF33_Med2C_nofuel', 'EMF33_Med2C_none'],
keep=False)
df = (
sr1p5
.filter(kyoto_ghg_2010='in range', category=cats)
.filter(**filter_args_aim)
)
Initialize a pyam.Statistics
instance¶
stats = pyam.Statistics(df=df, groupby={'category': cats})
stats.add(df.meta['year of netzero CO2 emissions'],
header='', subheader='Year of net-zero CO2 emissions')
header_peak = {'header': 'Geophysical characteristics at Peak Warming'}
header_2100 = {'header': 'Geophysical characteristics in 2100'}
stats.add(df.meta['median warming at peak (MAGICC6)'],
**header_peak, subheader='Peak|Median warming')
peak_year = 'year of peak warming (MAGICC6)'
stats.add(df.meta[peak_year], **header_peak, subheader='Peak|Year')
def add_indicator(stats, data, subheader=None, df=df,
year=['Peak', 2100], headers=[header_peak, header_2100]):
for y, h in zip(year, headers):
if y == 'Peak':
values = data.apply(lambda x: x[df.meta.loc[x.name[0:2]][peak_year]],
raw=False, axis=1)
else:
values = data.apply(lambda x: x[y], raw=False, axis=1)
stats.add(values, **h, subheader='{}|{}'.format(y, subheader))
v = 'AR5 climate diagnostics|Concentration|CO2|MAGICC6|MED'
sh = 'CO2 [ppm]'
co2_concentation = df.filter(variable=v).timeseries()
add_indicator(stats, co2_concentation, sh)
v = 'AR5 climate diagnostics|Forcing|MAGICC6|MED'
sh = 'RF all [Wm2]'
rf_all = df.filter(variable=v).timeseries()
add_indicator(stats, rf_all, sh)
v = 'AR5 climate diagnostics|Forcing|CO2|MAGICC6|MED'
sh = 'RF CO2 [Wm2]'
rf_co2 = df.filter(variable=v).timeseries()
add_indicator(stats, rf_co2, sh)
sh = 'RF non-CO2 [Wm2]'
rf_all.index = rf_all.index.droplevel([2, 3, 4])
rf_co2.index = rf_co2.index.droplevel([2, 3, 4])
rf_non_co2 = rf_all - rf_co2
add_indicator(stats, rf_non_co2, sh)
for (h, n) in [(header_peak, '2016 to peak warming'),
(header_2100, '2016-2100')]:
stats.add(df.meta['cumulative CO2 emissions ({}, Gt CO2)'.format(n)], **h,
subheader='cumulative CO2 emissions ({}, as submitted) [GtCO2]'.format(n))
harmonized_co2_vars = [
'Diagnostics|MAGICC6|Harmonized Input|Emissions|CO2|Energy and Industrial Processes',
'Diagnostics|MAGICC6|Harmonized Input|Emissions|CO2|AFOLU'
]
harmonized_co2 = (
df.filter(variable=harmonized_co2_vars, year=range(2010, 2101, 10))
.timeseries()
.groupby(['model', 'scenario'])
.sum()
)
baseyear = 2016
peak_harmonized_co2 = (
harmonized_co2.apply(lambda x:
pyam.cumulative(x, first_year=baseyear,
last_year=df.meta.loc[x.name[0:2]][peak_year]) / 1000,
raw=False, axis=1
)
)
stats.add(peak_harmonized_co2, **header_peak,
subheader='cumulative CO2 emissions (2016 to peak warming, harmonized) [GtCO2]')
eoc_harmonized_co2 = (
harmonized_co2.apply(lambda x:
pyam.cumulative(x, first_year=baseyear, last_year=2100) / 1000,
raw=False, axis=1
)
)
stats.add(eoc_harmonized_co2, **header_2100,
subheader='cumulative CO2 emissions (2016-2100, harmonized) [GtCO2]')
ex_prob = {}
for t in [1.5, 2.0, 2.5]:
v = 'AR5 climate diagnostics|Temperature|Exceedance Probability|{} °C|MAGICC6'.format(t)
sh = 'Exceedance Probability {} [%]'.format(t)
ex_prob = df.filter(variable=v).timeseries() * 100
add_indicator(stats, ex_prob, sh)
Overshoot severity¶
header_overshoot = {'header': 'Geophysical Characteristics of the Temperature Overshoot'}
variable = 'AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|MED'
mean_temperature = (
df.filter(variable=variable)
.timeseries()
)
def exceedance(temperature, threshold):
years = pyam.cross_threshold(temperature, threshold)
exceedance_yr = years[0] if len(years) else np.nan
return_yr = years[1] if len(years) > 1 else np.nan
overshoot_yr_count = return_yr - exceedance_yr
if not np.isnan(overshoot_yr_count):
severity = (
pyam.cumulative(temperature, exceedance_yr, return_yr)
- (overshoot_yr_count + 1) * threshold)
else:
severity = np.nan
return [exceedance_yr, return_yr, overshoot_yr_count, severity]
lst = []
for i in mean_temperature.index:
lst.append(pd.DataFrame(exceedance(mean_temperature.loc[i], 1.5)).T)
ex_years_15 = pd.concat(lst)
ex_years_15.index = mean_temperature.index
ex_years_15.columns = ['Exceedance year', 'Return year', 'Overshoot years', 'Overshoot severity']
lst = []
for i in mean_temperature.index:
lst.append(pd.DataFrame(exceedance(mean_temperature.loc[i], 2.0)).T)
ex_years_20 = pd.concat(lst)
ex_years_20.index = mean_temperature.index
ex_years_20.columns = ['Exceedance year', 'Return year', 'Overshoot years', 'Overshoot severity']
stats.add(ex_years_15['Exceedance year'], **header_overshoot,
subheader='Exceedance year|1.5°C [year]')
stats.add(ex_years_20['Exceedance year'], **header_overshoot,
subheader='Exceedance year|2.0°C [year]')
stats.add(ex_years_15['Overshoot years'], **header_overshoot,
subheader='Overshoot duration|1.5°C [number of years]')
stats.add(ex_years_20['Overshoot years'], **header_overshoot,
subheader='Overshoot duration|2.0°C [number of years]')
stats.add(ex_years_15['Overshoot severity'], **header_overshoot,
subheader='Overshoot severity|1.5°C [temperature-years]')
Display and export summary statistics to xlsx
¶
summary = stats.summarize(center='median', interquartile=True, custom_format='{:.1f}')
summary
summary.to_excel('output/table_2.SM.12_geophysical_characteristics.xlsx')