Move styling cell to bottom of notebooks

pull/10/head
Tim Skov Jacobsen 2019-12-03 09:57:24 +01:00
parent 2fb8441e91
commit e0a0661317
16 changed files with 9207 additions and 9072 deletions

File diff suppressed because one or more lines are too long

View File

@ -1,9 +1,367 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 2. Exercise solutions\n",
"There are often many ways to do the same thing, so these are just one way of solving the problems. \n",
"\n",
"# Exercise 1"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"First element of L1 is 10 and last elements of L1 is 60\n"
]
}
],
"source": [
"L1 = [10, 20, 30, 40, 50, 60]\n",
"first_elem = L1[0]\n",
"last_elem = L1[-1]\n",
"print(f'First element of L1 is {first_elem} and last elements of L1 is {last_elem}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 2"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[20, 30, 40]\n"
]
}
],
"source": [
"L1_sliced = L1[1:4]\n",
"print(L1_sliced)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 3"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Hi!', 'Hello', 'hey', 'Hey', 'Hi']\n"
]
}
],
"source": [
"L2 = ['Hi', 'Hello', 'Hi!', 'Hey', 'Hi', 'hey', 'Hey']\n",
"L2_unique = list(set(L2))\n",
"print(L2_unique)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 4"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"122\n"
]
}
],
"source": [
"d = {2: 122, 3: 535, 't': 'T', 'rom': 'cola'}\n",
"print(d[2]) # Print value corresponding to key 2"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"T\n"
]
}
],
"source": [
"print(d['t']) # Print value corresponding to key 't'"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"I like rom and cola, but mostly the rom\n"
]
}
],
"source": [
"print(f\"I like rom and {d['rom']}, but mostly the rom\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 5"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"23 squared is 529\n",
"73 squared is 5329\n",
"12 squared is 144\n",
"84 squared is 7056\n"
]
}
],
"source": [
"n = [23, 73, 12, 84]\n",
"\n",
"# By using conventional for loop\n",
"for elem in n:\n",
" print(f'{elem} squared is {elem**2}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 6"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[78.64750000000001, 113.25240000000001, 201.3376, 314.59000000000003, 491.546875, 805.3504]\n"
]
}
],
"source": [
"diameters = [10, 12, 16, 20, 25, 32]\n",
"areas = [3.1459 * dia**2 / 4 for dia in diameters] # To use pi the math module would need to be imported\n",
"print(areas)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['79', '113', '201', '315', '492', '805']\n"
]
}
],
"source": [
"# Convert elements to strings and round down\n",
"print([f'{area:.0f}' for area in areas])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 7"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Alpha', 'Bravo', 'Delta']\n"
]
}
],
"source": [
"phonetic_alphabet = ['Alpha', 'Bravo', 'Charlie', 'Delta', 'Echo', 'Foxtrot']\n",
"words_of_5_chars = [word for word in phonetic_alphabet if len(word) == 5]\n",
"print(words_of_5_chars)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 8"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'HE210A', 'HE210B', 'HE200A'}\n"
]
}
],
"source": [
"s1 = {'HE170B', 'HE210B', 'HE190A', 'HE200A', 'HE210A', 'HE210A'}\n",
"s2 = {'HE200A', 'HE210A', 'HE240A', 'HE200A', 'HE210B', 'HE340A'}\n",
"s_intersection = s1.intersection(s2)\n",
"print(s_intersection)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 9"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[125, 435, 362, 156, 80, 435, 435]\n"
]
}
],
"source": [
"rebar_stresses = (125, 501, 362, 156, 80, 475, 489)\n",
"fy = 435\n",
"sigma_s = [stress if stress <= 435 else fy for stress in rebar_stresses]\n",
"print(sigma_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 10"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[-18, -25, 0, -21, -15, 0]\n"
]
}
],
"source": [
"T1 = (-18, -27, 2, -21, -15, 5)\n",
"\n",
"T2 = []\n",
"for val in T1:\n",
" if val > 0:\n",
" T2.append(0)\n",
" elif 0 > val > -25:\n",
" T2.append(val)\n",
" else:\n",
" T2.append(-25)\n",
"\n",
"print(T2)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[-18, -25, 0, -21, -15, 0]\n"
]
}
],
"source": [
"# Alternative by list comprehension with chained if's\n",
"T3 = [0 if val > 0 else val if 0 > val > -25 else -25 for val in T1]\n",
"print(T3)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# End of exercises\n",
"\n",
"*The cell below is for setting the style of this document. It's not part of the exercises.*"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
@ -275,7 +633,7 @@
"<IPython.core.display.HTML object>"
]
},
"execution_count": 1,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@ -284,355 +642,6 @@
"from IPython.display import HTML\n",
"HTML('<style>{}</style>'.format(open('../css/cowi.css').read()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 2. Exercise solutions\n",
"There are often many ways to do the same thing, so these are just one way of solving the problems. \n",
"\n",
"# Exercise 1"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"First element of L1 is 10 and last elements of L1 is 60\n"
]
}
],
"source": [
"L1 = [10, 20, 30, 40, 50, 60]\n",
"first_elem = L1[0]\n",
"last_elem = L1[-1]\n",
"print(f'First element of L1 is {first_elem} and last elements of L1 is {last_elem}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 2"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[20, 30, 40]\n"
]
}
],
"source": [
"L1_sliced = L1[1:4]\n",
"print(L1_sliced)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 3"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['hey', 'Hello', 'Hi!', 'Hi', 'Hey']\n"
]
}
],
"source": [
"L2 = ['Hi', 'Hello', 'Hi!', 'Hey', 'Hi', 'hey', 'Hey']\n",
"L2_unique = list(set(L2))\n",
"print(L2_unique)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 4"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"122\n"
]
}
],
"source": [
"d = {2: 122, 3: 535, 't': 'T', 'rom': 'cola'}\n",
"print(d[2]) # Print value corresponding to key 2"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"T\n"
]
}
],
"source": [
"print(d['t']) # Print value corresponding to key 't'"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"I like rom and cola, but mostly the rom\n"
]
}
],
"source": [
"print(f\"I like rom and {d['rom']}, but mostly the rom\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 5"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"23 squared is 529\n",
"73 squared is 5329\n",
"12 squared is 144\n",
"84 squared is 7056\n"
]
}
],
"source": [
"n = [23, 73, 12, 84]\n",
"\n",
"# By using conventional for loop\n",
"for elem in n:\n",
" print(f'{elem} squared is {elem**2}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 6"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[78.64750000000001, 113.25240000000001, 201.3376, 314.59000000000003, 491.546875, 805.3504]\n"
]
}
],
"source": [
"diameters = [10, 12, 16, 20, 25, 32]\n",
"areas = [3.1459 * dia**2 / 4 for dia in diameters] # To use pi the math module would need to be imported\n",
"print(areas)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['79', '113', '201', '315', '492', '805']\n"
]
}
],
"source": [
"# Convert elements to strings and round down\n",
"print([f'{area:.0f}' for area in areas])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 7"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Alpha', 'Bravo', 'Delta']\n"
]
}
],
"source": [
"phonetic_alphabet = ['Alpha', 'Bravo', 'Charlie', 'Delta', 'Echo', 'Foxtrot']\n",
"words_of_5_chars = [word for word in phonetic_alphabet if len(word) == 5]\n",
"print(words_of_5_chars)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 8"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'HE210A', 'HE200A', 'HE210B'}\n"
]
}
],
"source": [
"s1 = {'HE170B', 'HE210B', 'HE190A', 'HE200A', 'HE210A', 'HE210A'}\n",
"s2 = {'HE200A', 'HE210A', 'HE240A', 'HE200A', 'HE210B', 'HE340A'}\n",
"s_intersection = s1.intersection(s2)\n",
"print(s_intersection)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 9"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[125, 435, 362, 156, 80, 435, 435]\n"
]
}
],
"source": [
"rebar_stresses = (125, 501, 362, 156, 80, 475, 489)\n",
"fy = 435\n",
"sigma_s = [stress if stress <= 435 else fy for stress in rebar_stresses]\n",
"print(sigma_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Exercise 10"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[-18, -25, 0, -21, -15, 0]\n"
]
}
],
"source": [
"T1 = (-18, -27, 2, -21, -15, 5)\n",
"\n",
"T2 = []\n",
"for val in T1:\n",
" if val > 0:\n",
" T2.append(0)\n",
" elif 0 > val > -25:\n",
" T2.append(val)\n",
" else:\n",
" T2.append(-25)\n",
"\n",
"print(T2)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[-18, -25, 0, -21, -15, 0]\n"
]
}
],
"source": [
"# Alternative by list comprehension with chained if's\n",
"T3 = [0 if val > 0 else val if 0 > val > -25 else -25 for val in T1]\n",
"print(T3)"
]
}
],
"metadata": {
@ -652,7 +661,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
"version": "3.7.4"
},
"latex_envs": {
"LaTeX_envs_menu_present": true,

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,200 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 6. Exercise \n",
"\n",
"## Intro\n",
"This exercise is taken from a project example where shear forces in a shell element from a Sofistik Finite Element calculation are extracted and plotted into one figure per Construction Stage. \n",
"\n",
"The purpose of this procedure to give a quick overview of the results after a calculation has finished, and to be able to flip through the Construction Stages to easily compare them. \n",
"\n",
"There are in total 56 Construction Stages in the dataset used and three different shear keys, resulting in 168 plots.\n",
"\n",
"Each plot will look something like this: \n",
"\n",
"![title](BS301_LC4069.png)\n",
"\n",
"Some plots will be almost empty as loads are close to zero in some Stages. \n",
"\n",
"The dataset is called `shear_keys_base_slab_v20.txt` and can be found in the Session 6 folder for the course. \n",
"\n",
"> **Note:** Understanding the structural context of the dataset is not important for solving the exercise. The same concepts could be used for all other types of datasets. \n",
"\n",
"## The exercise\n",
"As stated, this little program was originally used on a project. The general structure of the script is given and provides the basis for the exercise. Many code lines have been removed and the exercise consists of filling them in again.\n",
"\n",
"All code comments from the original script have been retained as guidance through the exercise. \n",
"\n",
"The problem is partly about reading and understanding already written code and partly about writing code yourself. \n",
"\n",
"Reading other people's code plays a big role when collaboration on programming projects, and it's sometimes harder than writing the code yourself. Thus, it's a good exercise to get some exposure to this.\n",
"\n",
"Before starting, open the dataset file `shear_keys_base_slab_v20.txt` and take a brief look to get a feel for what you are working with. \n",
"\n",
"Copy this directly into your editor to use as a guide through the exercise. \n",
"\n",
"---\n",
"~~~python\n",
"# Import libraries \n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"# Set style for matplotlib plots\n",
"plt.style.use('seaborn-whitegrid') \n",
"\n",
"# Dictionary for mapping node numbers to user chosen shear key names\n",
"shear_keys = { \n",
" # Shear key in Base Slab 101\n",
" 'BS101': range(10101, 10199), \n",
" \n",
" # Shear key in Base Slab 201\n",
" 'BS201': range(20101, 20199), \n",
" \n",
" # Shear key in Base Slab 301 \n",
" 'BS301': range(30101, 30214), \n",
"} \n",
" \n",
"# Set name of file that contains the dataset\n",
"file_name = 'shear_keys_base_slab_v20.txt'\n",
"\n",
"# Read dataset from text file into dataframe, save it as 'df'\n",
" # <Code here!>\n",
"\n",
"# Extract version number from file name as 'vXX'\n",
"# (assume the last 6 characters will always be '...vXX.txt')\n",
" # <Code here!>\n",
"\n",
"# Print the head of the dataframe to check it\n",
" # <Code here!>\n",
"\n",
"# Construct a dictionary that maps load case numbers to titles (dict auto removes duplicates)\n",
"lc_no_to_title_map = dict(zip(df['LC'], df['LC-title'])) \n",
" \n",
"# Loop over all shear key names and their corresponding node numbers \n",
"for shear_key, nodes in shear_keys.items():\n",
" \n",
" # Loop over all load cases, create plots and save them to a png-file\n",
" for lc in df['LC'].unique():\n",
"\n",
" # Get title of current load case from mapping dictionary\n",
" # <Code here!> (see hint 1 below)\n",
" \n",
" # Filter dataframe based on load case and nodes in shear key\n",
" # <Code here!> (see hint 2 below)\n",
" \n",
" # Create figure\n",
" # <Code here!> \n",
" \n",
" # Create x-values for plot as numbers running from 1 to length of y-values\n",
" # <Code here!> \n",
" \n",
" # Create y-values for plot as shear forces vx\n",
" # <Code here!> \n",
" \n",
" # Extract indices where y-values are negative and positive, respectively\n",
" idx_neg = np.where(y<0)\n",
" idx_pos = np.where(y>=0)\n",
" \n",
" # Extract x-values where y-values are negative and positive, respectively\n",
" x_neg, x_pos = np.take(x, idx_neg)[0], np.take(x, idx_pos)[0]\n",
" \n",
" # Extract y-values where y-values are negative and positive, respectively\n",
" y_neg, y_pos = np.take(y, idx_neg)[0], np.take(y, idx_pos)[0]\n",
" \n",
" # Plot points for negative and positve values as two separate lines\n",
" # <Code here!> \n",
" \n",
" # Fill between y=0 and the lines where y-values are negative and positive, respectively \n",
" # <Code here!> \n",
" \n",
" # Set titles and x- and y-labels\n",
" # <Code here!> \n",
" \n",
" # Save figure to png-file with meaningful name that varies in every loop\n",
" # <Code here!>\n",
"~~~\n",
"---\n",
"### The hints below refer to the comments in the code above.\n",
"\n",
"* **Hint 1:** The dictionary `lc_no_to_title_map` has load case numbers as keys and the corresponding titles as values. Use this to get the load case title from inside the loop.\n",
"\n",
"* **Hint 2:** Be sure to save the filtered DataFrame to a new variable. If it is saved to a variable of the same name it will be mutated in every loop and quickly end up empty. \n",
"\n",
"### Looping over dictionary items\n",
"The outer loop the iterates over the key/value pairs of the dictionary called `shear_keys`. The key/value pairs in a dictionary are referred to as its **items**.\n",
"\n",
"`shear_keys.items()` returns the key and value in each loop:\n",
"\n",
"---\n",
"```python\n",
"for key, value in shear_keys.items():\n",
" print(key, value)\n",
"```\n",
"would print:\n",
"\n",
"```\n",
"BS101 range(10101, 10199)\n",
"BS201 range(20101, 20199)\n",
"BS301 range(30101, 30214)\n",
"```\n",
"----\n",
"\n",
"This functionality is equivalent to using `zip()` with lists of keys and values as arguments:\n",
"\n",
"```python\n",
"for key, value in zip(shear_keys.keys(), shear_keys.values()):\n",
" print(key, value)\n",
"```\n",
"which would print exactly the same. \n",
"The built-in dictionary class in Python just has a method for creating this common type of iteration so it is more readable using `dict.items()`.\n",
"\n",
"### Some improvements\n",
"\n",
"* Comparison between the plots when flipping through them could be improved by having the same limits for the y-axis on all plots. This can be set by `ax.set_ylim(bottom_limit, top_limit)`. If any of them are left undefined they will be auto adjusted by default.\n",
"\n",
"Source: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.set_ylim.html\n",
"\n",
"\n",
"* The function below can find the indices of the peak values, which can be used to annotate the key points to make the plot easier to read. \n",
"\n",
"---\n",
"~~~python\n",
"def find_local_extrema(y_curve):\n",
" '''\n",
" Return indices of all local extrema for the given sequence of values. Indices are sorted in\n",
" ascending format with no distinction between local maximum and minimum.\n",
" '''\n",
" local_max, _ = find_peaks(y_curve, height=0)\n",
" local_min, _ = find_peaks(-y_curve, height=0)\n",
" return sorted( np.append(local_min, local_max) ) \n",
"~~~\n",
"---\n",
"\n",
"Prior to running the function, `find_peaks` from the `scipy` library must be imported: `from scipy.signal import find_peaks`\n",
"\n",
"After having found the extrema values, they can be annotated like so:\n",
"\n",
"---\n",
"~~~python\n",
"for extr_val in extrema_values:\n",
" ax.annotate(f'{y[extr_val]:.0f}', xy=(x[extr_val], y[extr_val]), xytext=(x[extr_val], y[extr_val]))\n",
"~~~\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# End of exercises\n",
"\n",
"*The cell below is for setting the style of this document. It's not part of the exercises.*"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -284,192 +479,6 @@
"from IPython.display import HTML\n",
"HTML('<style>{}</style>'.format(open('../css/cowi.css').read()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 6. Exercise \n",
"\n",
"## Intro\n",
"This exercise is taken from a project example where shear forces in a shell element from a Sofistik Finite Element calculation are extracted and plotted into one figure per Construction Stage. \n",
"\n",
"The purpose of this procedure to give a quick overview of the results after a calculation has finished, and to be able to flip through the Construction Stages to easily compare them. \n",
"\n",
"There are in total 56 Construction Stages in the dataset used and three different shear keys, resulting in 168 plots.\n",
"\n",
"Each plot will look something like this: \n",
"\n",
"![title](BS301_LC4069.png)\n",
"\n",
"Some plots will be almost empty as loads are close to zero in some Stages. \n",
"\n",
"The dataset is called `shear_keys_base_slab_v20.txt` and can be found in the Session 6 folder for the course. \n",
"\n",
"> **Note:** Understanding the structural context of the dataset is not important for solving the exercise. The same concepts could be used for all other types of datasets. \n",
"\n",
"## The exercise\n",
"As stated, this little program was originally used on a project. The general structure of the script is given and provides the basis for the exercise. Many code lines have been removed and the exercise consists of filling them in again.\n",
"\n",
"All code comments from the original script have been retained as guidance through the exercise. \n",
"\n",
"The problem is partly about reading and understanding already written code and partly about writing code yourself. \n",
"\n",
"Reading other people's code plays a big role when collaboration on programming projects, and it's sometimes harder than writing the code yourself. Thus, it's a good exercise to get some exposure to this.\n",
"\n",
"Before starting, open the dataset file `shear_keys_base_slab_v20.txt` and take a brief look to get a feel for what you are working with. \n",
"\n",
"Copy this directly into your editor to use as a guide through the exercise. \n",
"\n",
"---\n",
"~~~python\n",
"# Import libraries \n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"# Set style for matplotlib plots\n",
"plt.style.use('seaborn-whitegrid') \n",
"\n",
"# Dictionary for mapping node numbers to user chosen shear key names\n",
"shear_keys = { \n",
" # Shear key in Base Slab 101\n",
" 'BS101': range(10101, 10199), \n",
" \n",
" # Shear key in Base Slab 201\n",
" 'BS201': range(20101, 20199), \n",
" \n",
" # Shear key in Base Slab 301 \n",
" 'BS301': range(30101, 30214), \n",
"} \n",
" \n",
"# Set name of file that contains the dataset\n",
"file_name = 'shear_keys_base_slab_v20.txt'\n",
"\n",
"# Read dataset from text file into dataframe, save it as 'df'\n",
" # <Code here!>\n",
"\n",
"# Extract version number from file name as 'vXX'\n",
"# (assume the last 6 characters will always be '...vXX.txt')\n",
" # <Code here!>\n",
"\n",
"# Print the head of the dataframe to check it\n",
" # <Code here!>\n",
"\n",
"# Construct a dictionary that maps load case numbers to titles (dict auto removes duplicates)\n",
"lc_no_to_title_map = dict(zip(df['LC'], df['LC-title'])) \n",
" \n",
"# Loop over all shear key names and their corresponding node numbers \n",
"for shear_key, nodes in shear_keys.items():\n",
" \n",
" # Loop over all load cases, create plots and save them to a png-file\n",
" for lc in df['LC'].unique():\n",
"\n",
" # Get title of current load case from mapping dictionary\n",
" # <Code here!> (see hint 1 below)\n",
" \n",
" # Filter dataframe based on load case and nodes in shear key\n",
" # <Code here!> (see hint 2 below)\n",
" \n",
" # Create figure\n",
" # <Code here!> \n",
" \n",
" # Create x-values for plot as numbers running from 1 to length of y-values\n",
" # <Code here!> \n",
" \n",
" # Create y-values for plot as shear forces vx\n",
" # <Code here!> \n",
" \n",
" # Extract indices where y-values are negative and positive, respectively\n",
" idx_neg = np.where(y<0)\n",
" idx_pos = np.where(y>=0)\n",
" \n",
" # Extract x-values where y-values are negative and positive, respectively\n",
" x_neg, x_pos = np.take(x, idx_neg)[0], np.take(x, idx_pos)[0]\n",
" \n",
" # Extract y-values where y-values are negative and positive, respectively\n",
" y_neg, y_pos = np.take(y, idx_neg)[0], np.take(y, idx_pos)[0]\n",
" \n",
" # Plot points for negative and positve values as two separate lines\n",
" # <Code here!> \n",
" \n",
" # Fill between y=0 and the lines where y-values are negative and positive, respectively \n",
" # <Code here!> \n",
" \n",
" # Set titles and x- and y-labels\n",
" # <Code here!> \n",
" \n",
" # Save figure to png-file with meaningful name that varies in every loop\n",
" # <Code here!>\n",
"~~~\n",
"---\n",
"### The hints below refer to the comments in the code above.\n",
"\n",
"* **Hint 1:** The dictionary `lc_no_to_title_map` has load case numbers as keys and the corresponding titles as values. Use this to get the load case title from inside the loop.\n",
"\n",
"* **Hint 2:** Be sure to save the filtered DataFrame to a new variable. If it is saved to a variable of the same name it will be mutated in every loop and quickly end up empty. \n",
"\n",
"### Looping over dictionary items\n",
"The outer loop the iterates over the key/value pairs of the dictionary called `shear_keys`. The key/value pairs in a dictionary are referred to as its **items**.\n",
"\n",
"`shear_keys.items()` returns the key and value in each loop:\n",
"\n",
"---\n",
"```python\n",
"for key, value in shear_keys.items():\n",
" print(key, value)\n",
"```\n",
"would print:\n",
"\n",
"```\n",
"BS101 range(10101, 10199)\n",
"BS201 range(20101, 20199)\n",
"BS301 range(30101, 30214)\n",
"```\n",
"----\n",
"\n",
"This functionality is equivalent to using `zip()` with lists of keys and values as arguments:\n",
"\n",
"```python\n",
"for key, value in zip(shear_keys.keys(), shear_keys.values()):\n",
" print(key, value)\n",
"```\n",
"which would print exactly the same. \n",
"The built-in dictionary class in Python just has a method for creating this common type of iteration so it is more readable using `dict.items()`.\n",
"\n",
"### Some improvements\n",
"\n",
"* Comparison between the plots when flipping through them could be improved by having the same limits for the y-axis on all plots. This can be set by `ax.set_ylim(bottom_limit, top_limit)`. If any of them are left undefined they will be auto adjusted by default.\n",
"\n",
"Source: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.set_ylim.html\n",
"\n",
"\n",
"* The function below can find the indices of the peak values, which can be used to annotate the key points to make the plot easier to read. \n",
"\n",
"---\n",
"~~~python\n",
"def find_local_extrema(y_curve):\n",
" '''\n",
" Return indices of all local extrema for the given sequence of values. Indices are sorted in\n",
" ascending format with no distinction between local maximum and minimum.\n",
" '''\n",
" local_max, _ = find_peaks(y_curve, height=0)\n",
" local_min, _ = find_peaks(-y_curve, height=0)\n",
" return sorted( np.append(local_min, local_max) ) \n",
"~~~\n",
"---\n",
"\n",
"Prior to running the function, `find_peaks` from the `scipy` library must be imported: `from scipy.signal import find_peaks`\n",
"\n",
"After having found the extrema values, they can be annotated like so:\n",
"\n",
"---\n",
"~~~python\n",
"for extr_val in extrema_values:\n",
" ax.annotate(f'{y[extr_val]:.0f}', xy=(x[extr_val], y[extr_val]), xytext=(x[extr_val], y[extr_val]))\n",
"~~~\n",
"---"
]
}
],
"metadata": {
@ -489,7 +498,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
"version": "3.7.4"
},
"latex_envs": {
"LaTeX_envs_menu_present": true,

View File

@ -1,8 +1,317 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 6. Exercise solution"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"code_folding": []
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>LC</th>\n",
" <th>LC-title</th>\n",
" <th>NR</th>\n",
" <th>NG</th>\n",
" <th>vx[kN/m]</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20101</td>\n",
" <td>201</td>\n",
" <td>2.33</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20102</td>\n",
" <td>201</td>\n",
" <td>1.29</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20103</td>\n",
" <td>201</td>\n",
" <td>0.87</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20104</td>\n",
" <td>201</td>\n",
" <td>0.74</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20105</td>\n",
" <td>201</td>\n",
" <td>0.64</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" LC LC-title NR NG vx[kN/m]\n",
"0 4031 Shear keys - BS - 2553/2554 20101 201 2.33\n",
"1 4031 Shear keys - BS - 2553/2554 20102 201 1.29\n",
"2 4031 Shear keys - BS - 2553/2554 20103 201 0.87\n",
"3 4031 Shear keys - BS - 2553/2554 20104 201 0.74\n",
"4 4031 Shear keys - BS - 2553/2554 20105 201 0.64"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Import libraries \n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"# Set style for matplotlib plots\n",
"plt.style.use('seaborn-whitegrid') \n",
"\n",
"# Dictionary for mapping node numbers to user chosen shear key names\n",
"shear_keys = { \n",
" # Shear key in Base Slab 101\n",
" 'BS101': range(10101, 10199), \n",
"\n",
" # Shear key in Base Slab 201\n",
" 'BS201': range(20101, 20199), \n",
"\n",
" # Shear key in Base Slab 301 \n",
" 'BS301': range(30101, 30214), \n",
"} \n",
"\n",
"# Set file name of dataset\n",
"file_name = 'shear_keys_base_slab_v20.txt'\n",
"\n",
"# Read dataset from text file into dataframe, save it as 'df'\n",
"df = pd.read_csv(file_name)\n",
"\n",
"# Extract version number from file name as 'vXX'\n",
"# (assume the last 6 characters will always be '...vXX.txt')\n",
"version_number = file_name[-7:-4]\n",
"\n",
"# Print the head of the dataframe to check it\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"ename": "FileNotFoundError",
"evalue": "[Errno 2] No such file or directory: 'Plots/v20/BS101_4031.png'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-2-d22c1bcd002b>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 51\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 52\u001b[0m \u001b[1;31m# Save figure to png-file with meaningful name that varies in every loop\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 53\u001b[1;33m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msavefig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf'Plots/{version_number}/{shear_key}_{lc}.png'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\lib\\site-packages\\matplotlib\\pyplot.py\u001b[0m in \u001b[0;36msavefig\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 720\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0msavefig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 721\u001b[0m \u001b[0mfig\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgcf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 722\u001b[1;33m \u001b[0mres\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfig\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msavefig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 723\u001b[0m \u001b[0mfig\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanvas\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdraw_idle\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# need this if 'transparent=True' to reset colors\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 724\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mres\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\lib\\site-packages\\matplotlib\\figure.py\u001b[0m in \u001b[0;36msavefig\u001b[1;34m(self, fname, transparent, **kwargs)\u001b[0m\n\u001b[0;32m 2178\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpatch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mset_visible\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mframeon\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2179\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2180\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanvas\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mprint_figure\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2181\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2182\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mframeon\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\lib\\site-packages\\matplotlib\\backend_bases.py\u001b[0m in \u001b[0;36mprint_figure\u001b[1;34m(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)\u001b[0m\n\u001b[0;32m 2080\u001b[0m \u001b[0morientation\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0morientation\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2081\u001b[0m \u001b[0mbbox_inches_restore\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0m_bbox_inches_restore\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2082\u001b[1;33m **kwargs)\n\u001b[0m\u001b[0;32m 2083\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2084\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mbbox_inches\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mrestore_bbox\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\lib\\site-packages\\matplotlib\\backends\\backend_agg.py\u001b[0m in \u001b[0;36mprint_png\u001b[1;34m(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)\u001b[0m\n\u001b[0;32m 528\u001b[0m \u001b[0mrenderer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_renderer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 529\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mcbook\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_setattr_cm\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrenderer\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdpi\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfigure\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdpi\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 530\u001b[1;33m \u001b[0mcbook\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen_file_cm\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfilename_or_obj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"wb\"\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfh\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 531\u001b[0m _png.write_png(renderer._renderer, fh,\n\u001b[0;32m 532\u001b[0m self.figure.dpi, metadata=metadata)\n",
"\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\lib\\contextlib.py\u001b[0m in \u001b[0;36m__enter__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;32mdel\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfunc\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 112\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mnext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 113\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 114\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"generator didn't yield\"\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\lib\\site-packages\\matplotlib\\cbook\\__init__.py\u001b[0m in \u001b[0;36mopen_file_cm\u001b[1;34m(path_or_file, mode, encoding)\u001b[0m\n\u001b[0;32m 445\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mopen_file_cm\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath_or_file\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"r\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 446\u001b[0m \u001b[1;34mr\"\"\"Pass through file objects and context-manage `.PathLike`\\s.\"\"\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 447\u001b[1;33m \u001b[0mfh\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mopened\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mto_filehandle\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath_or_file\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 448\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mopened\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 449\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mfh\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\lib\\site-packages\\matplotlib\\cbook\\__init__.py\u001b[0m in \u001b[0;36mto_filehandle\u001b[1;34m(fname, flag, return_opened, encoding)\u001b[0m\n\u001b[0;32m 430\u001b[0m \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mbz2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mBZ2File\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mflag\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 431\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 432\u001b[1;33m \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mflag\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mencoding\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 433\u001b[0m \u001b[0mopened\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 434\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'seek'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'Plots/v20/BS101_4031.png'"
]
}
],
"source": [
"%%capture\n",
"# %%capture prevent plots from showing as cell output\n",
"# ------------------------------------------------------\n",
"\n",
"# Contruct a dictionary that maps load case numbers to titles (auto removes duplicates)\n",
"lc_no_to_title_map = dict(zip(df['LC'], df['LC-title'])) \n",
"\n",
"# Loop over all shear key names and their corresponding node numbers \n",
"for shear_key, nodes in shear_keys.items():\n",
"\n",
" # Loop over all load cases, create plots and save them to a png-file\n",
" for lc in df['LC'].unique():\n",
"\n",
" # Get title of current load case from mapping dictionary\n",
" lc_title = lc_no_to_title_map[lc]\n",
" \n",
" # Filter dataframe based on load case and nodes in shear key\n",
" df_filtered = df[(df['LC'] == lc) & (df['NR'].isin(nodes))]\n",
" \n",
" # Create figure\n",
" plt.figure(figsize=(12, 5))\n",
" \n",
" # Create x-values for plot as numbers running from 1 to length of y-values\n",
" x = np.array(range(1, len(df_filtered['vx[kN/m]'])+1))\n",
" \n",
" # Create y-values for plot as shear forces vx\n",
" y = df_filtered['vx[kN/m]'].values\n",
" \n",
" # Extract indices where y-values are negative and positive, respectively\n",
" idx_neg = np.where(y<0)\n",
" idx_pos = np.where(y>=0)\n",
"\n",
" # Extract x-values where y-values are negative and positive, respectively\n",
" x_neg, x_pos = np.take(x, idx_neg)[0], np.take(x, idx_pos)[0]\n",
"\n",
" # Extract y-values where y-values are negative and positive, respectively\n",
" y_neg, y_pos = np.take(y, idx_neg)[0], np.take(y, idx_pos)[0]\n",
"\n",
" # Plot lines for negative and positve values as two separate lines\n",
" plt.plot(x_neg, y_neg, '.', color='salmon')\n",
" plt.plot(x_pos, y_pos, '.', color='cornflowerblue') \n",
" \n",
" # Fill between y=0 and the lines where y-values are negative and positive, respectively \n",
" plt.fill_between(x, y, where=y<0, color='salmon', alpha=0.25, interpolate=True)\n",
" plt.fill_between(x, y, where=y>=0, color='cornflowerblue', alpha=0.25, interpolate=True)\n",
" \n",
" # Set titles and x- and y-labels\n",
" plt.title(f'Shear force $vx$ [kN/m] for base slab shear key ${shear_key}$' + '\\n' +\n",
" f'{lc_title} $(LC: {lc}) ({version_number})$', fontsize=18)\n",
" plt.xlabel('Points along shear key', fontsize=14)\n",
" plt.ylabel('Slab shear force $vx$ [kN/m]', fontsize=14)\n",
" \n",
" # Save figure to png-file with meaningful name that varies in every loop\n",
" plt.savefig(f'Plots/{version_number}/{shear_key}_{lc}.png')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Explanations to some of the code lines are given below\n",
"\n",
"* **Line with `df_filtered = ...`:** The dataframe `df_filtered` is uniqie in every loop, since it is filtered based on the current load case and the nodes in the current shear key. The filtering is done based on the original large dataframe. Every operation from here on out inside the loop must use `df_filtered` and not the original dataframe. For filtering based on the nodes in the shear key `.isin` is used. This is a good way to filter based on values in a list. And the nodes for each shear key is is stored in the loop variable `nodes` as a list.\n",
"\n",
"\n",
"* **Line with `x = ...`:** This generates the x-values, which will just be increasing numbers from 1 and up. Note that `range(start, stop)` goes from `start` to `stop-1`. \n",
"\n",
"\n",
"* **Line with `y = ...`:** Collects the y-values for the plot in the current loop. `df['vx[kN/m']]` extracts a Series, which is a single column, but with index numbers to its left. To get only the column values as an array, we do `df['vx[kN/m']].values`. Btw, this will also work for dataframes, but will return a 'matrix' array instead of a 'vector' array as for Series.\n",
"\n",
"\n",
"* **Lines with `plt.plot()`:** The negative and positive points are plotting as two separate data series so they can have different colors. Connecting the points by lines makes the plot look strange after it has been separated, so only points are plotted. \n",
"\n",
"\n",
"* **Lines with `plt.fillbetween()`:** Parameter `alpha` set the opacity. Parameter `interpolate=True` will make sure the fill is not \"cut off\" near a crossing of the y-axis.\n",
"\n",
"\n",
"* **Lines with for `plt.title()`**: When creating plots by loops the title for each plot should probably have a unique title that varies with the loop variable(s). Then saving to a file this is important in order to not overwrite the plot with the same name in each loop. A convenient way to put variables inside text is by using f-strings. \n",
"\n",
"\n",
"* **Line with `plt.savefig()`** A subfolder called `Plots` and a subsubfolder with the version number `v20` have to be created before running this. It the folders are not present `FileNoteFoundError` will be raised. Alternatively, the png-files could be saved directly in the same folder as the script. In that case only `'<file_name>.png'` would be necessary as the argument for `plt.savefig()`. By saving in a subfolder whose name depends on the version number given in the name original txt file, it is easier to keep track of versions and avoid overwriting files from previous versions. \n",
"\n",
"## Improvements mentioned in exercise text\n",
"\n",
"### y-limits of plot\n",
"Set the y-limits of the plot by `plt.ylim([ymin, ymax])`. \n",
"\n",
"`ymin` and `ymax` could be determined as the largest occuring magnitude values among all plots. \n",
"\n",
"~~~python \n",
"# Put this line before the loop\n",
"all_loads = df['vx[kN/m]']\n",
"extr_magnitude = max(abs(min(all_loads)), abs(max(all_loads)))\n",
"~~~\n",
"\n",
"~~~python\n",
"# Put this line in each loop before saving the figure\n",
"plt.ylim([-1.1*extr_magnitude, 1.1*extr_magnitude])\n",
"~~~\n",
"\n",
"\n",
"### Annotations of local extrema\n",
"For annotating the local extrema points, define the function `find_local_extrema(y_curve)` from the exercise text somewhere in the script before the loop. \n",
"\n",
"Afterwards, include the lines below within the for loop. They should be placed somewhere between when the figure is creates and when it's saved.\n",
"\n",
"~~~python\n",
"# Find local extrema points of graph\n",
"extrema_indices = find_local_extrema(y)\n",
"\n",
"# Annotate each points on the plot\n",
"for extr_idx in extreme_indices:\n",
" ax.annotate(f'{y[extr_idx]:.0f}', xy=(x[extr_idx], y[extr_idx]), xytext=(x[extr_idx], y[extr_idx]))\n",
"~~~\n",
"\n",
"This annotates the local peaks which helps for readability of the graph. The annotations could be even better than this by ensuring that text does not overlap. and by always annotating the two end points of the graph.\n",
"\n",
"Documentation for annotating method: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.annotate.html \n",
"\n",
"### Another improvement\n",
"Instead of having to manually create the subdirectories `Plots` and `/v20`, we could check if they exist and create them if they don't. For this, we could use the built-in `os` module to manipulate the file system.\n",
"\n",
"One way of doing it is this:\n",
"\n",
"---\n",
"```python\n",
"# Check if directory exists\n",
"if not os.path.exists(directory):\n",
" # Create it if it doesn't\n",
" os.makedirs(directory)\n",
"```\n",
"---\n",
"\n",
"Where the variable `directory` is a string with the path to the desired directory.\n",
"\n",
"Make sure that `import os` is stated in the beginning of the script. This is a built-in module, so no installation is needed.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# End of exercises\n",
"\n",
"*The cell below is for setting the style of this document. It's not part of the exercises.*"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
@ -275,7 +584,7 @@
"<IPython.core.display.HTML object>"
]
},
"execution_count": 1,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
@ -284,306 +593,6 @@
"from IPython.display import HTML\n",
"HTML('<style>{}</style>'.format(open('../css/cowi.css').read()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 6. Exercise solution"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"code_folding": []
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>LC</th>\n",
" <th>LC-title</th>\n",
" <th>NR</th>\n",
" <th>NG</th>\n",
" <th>vx[kN/m]</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20101</td>\n",
" <td>201</td>\n",
" <td>2.33</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20102</td>\n",
" <td>201</td>\n",
" <td>1.29</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20103</td>\n",
" <td>201</td>\n",
" <td>0.87</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20104</td>\n",
" <td>201</td>\n",
" <td>0.74</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4031</td>\n",
" <td>Shear keys - BS - 2553/2554</td>\n",
" <td>20105</td>\n",
" <td>201</td>\n",
" <td>0.64</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" LC LC-title NR NG vx[kN/m]\n",
"0 4031 Shear keys - BS - 2553/2554 20101 201 2.33\n",
"1 4031 Shear keys - BS - 2553/2554 20102 201 1.29\n",
"2 4031 Shear keys - BS - 2553/2554 20103 201 0.87\n",
"3 4031 Shear keys - BS - 2553/2554 20104 201 0.74\n",
"4 4031 Shear keys - BS - 2553/2554 20105 201 0.64"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Import libraries \n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"# Set style for matplotlib plots\n",
"plt.style.use('seaborn-whitegrid') \n",
"\n",
"# Dictionary for mapping node numbers to user chosen shear key names\n",
"shear_keys = { \n",
" # Shear key in Base Slab 101\n",
" 'BS101': range(10101, 10199), \n",
"\n",
" # Shear key in Base Slab 201\n",
" 'BS201': range(20101, 20199), \n",
"\n",
" # Shear key in Base Slab 301 \n",
" 'BS301': range(30101, 30214), \n",
"} \n",
"\n",
"# Set file name of dataset\n",
"file_name = 'shear_keys_base_slab_v20.txt'\n",
"\n",
"# Read dataset from text file into dataframe, save it as 'df'\n",
"df = pd.read_csv(file_name)\n",
"\n",
"# Extract version number from file name as 'vXX'\n",
"# (assume the last 6 characters will always be '...vXX.txt')\n",
"version_number = file_name[-7:-4]\n",
"\n",
"# Print the head of the dataframe to check it\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"ename": "FileNotFoundError",
"evalue": "[Errno 2] No such file or directory: 'Plots/v20/BS101_4031.png'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-3-d22c1bcd002b>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 51\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 52\u001b[0m \u001b[1;31m# Save figure to png-file with meaningful name that varies in every loop\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 53\u001b[1;33m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msavefig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf'Plots/{version_number}/{shear_key}_{lc}.png'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32m~\\Miniconda3\\lib\\site-packages\\matplotlib\\pyplot.py\u001b[0m in \u001b[0;36msavefig\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 687\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0msavefig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 688\u001b[0m \u001b[0mfig\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgcf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 689\u001b[1;33m \u001b[0mres\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfig\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msavefig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 690\u001b[0m \u001b[0mfig\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanvas\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdraw_idle\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# need this if 'transparent=True' to reset colors\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 691\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mres\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Miniconda3\\lib\\site-packages\\matplotlib\\figure.py\u001b[0m in \u001b[0;36msavefig\u001b[1;34m(self, fname, frameon, transparent, **kwargs)\u001b[0m\n\u001b[0;32m 2092\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mset_frameon\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mframeon\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2093\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2094\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcanvas\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mprint_figure\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2095\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2096\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mframeon\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Miniconda3\\lib\\site-packages\\matplotlib\\backend_bases.py\u001b[0m in \u001b[0;36mprint_figure\u001b[1;34m(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)\u001b[0m\n\u001b[0;32m 2073\u001b[0m \u001b[0morientation\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0morientation\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2074\u001b[0m \u001b[0mbbox_inches_restore\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0m_bbox_inches_restore\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2075\u001b[1;33m **kwargs)\n\u001b[0m\u001b[0;32m 2076\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2077\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mbbox_inches\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mrestore_bbox\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Miniconda3\\lib\\site-packages\\matplotlib\\backends\\backend_agg.py\u001b[0m in \u001b[0;36mprint_png\u001b[1;34m(self, filename_or_obj, *args, **kwargs)\u001b[0m\n\u001b[0;32m 519\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 520\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mcbook\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_setattr_cm\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrenderer\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdpi\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfigure\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdpi\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 521\u001b[1;33m \u001b[0mcbook\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen_file_cm\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfilename_or_obj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"wb\"\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mfh\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 522\u001b[0m _png.write_png(renderer._renderer, fh,\n\u001b[0;32m 523\u001b[0m self.figure.dpi, metadata=metadata)\n",
"\u001b[1;32m~\\Miniconda3\\lib\\contextlib.py\u001b[0m in \u001b[0;36m__enter__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;32mdel\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfunc\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 112\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mnext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 113\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 114\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"generator didn't yield\"\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Miniconda3\\lib\\site-packages\\matplotlib\\cbook\\__init__.py\u001b[0m in \u001b[0;36mopen_file_cm\u001b[1;34m(path_or_file, mode, encoding)\u001b[0m\n\u001b[0;32m 405\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mopen_file_cm\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath_or_file\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"r\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 406\u001b[0m \u001b[1;34mr\"\"\"Pass through file objects and context-manage `.PathLike`\\s.\"\"\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 407\u001b[1;33m \u001b[0mfh\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mopened\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mto_filehandle\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath_or_file\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 408\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mopened\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 409\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mfh\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Miniconda3\\lib\\site-packages\\matplotlib\\cbook\\__init__.py\u001b[0m in \u001b[0;36mto_filehandle\u001b[1;34m(fname, flag, return_opened, encoding)\u001b[0m\n\u001b[0;32m 390\u001b[0m \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mbz2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mBZ2File\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mflag\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 391\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 392\u001b[1;33m \u001b[0mfh\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mflag\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mencoding\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mencoding\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 393\u001b[0m \u001b[0mopened\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 394\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'seek'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'Plots/v20/BS101_4031.png'"
]
}
],
"source": [
"%%capture\n",
"# %%capture prevent plots from showing as cell output\n",
"# ------------------------------------------------------\n",
"\n",
"# Contruct a dictionary that maps load case numbers to titles (auto removes duplicates)\n",
"lc_no_to_title_map = dict(zip(df['LC'], df['LC-title'])) \n",
"\n",
"# Loop over all shear key names and their corresponding node numbers \n",
"for shear_key, nodes in shear_keys.items():\n",
"\n",
" # Loop over all load cases, create plots and save them to a png-file\n",
" for lc in df['LC'].unique():\n",
"\n",
" # Get title of current load case from mapping dictionary\n",
" lc_title = lc_no_to_title_map[lc]\n",
" \n",
" # Filter dataframe based on load case and nodes in shear key\n",
" df_filtered = df[(df['LC'] == lc) & (df['NR'].isin(nodes))]\n",
" \n",
" # Create figure\n",
" plt.figure(figsize=(12, 5))\n",
" \n",
" # Create x-values for plot as numbers running from 1 to length of y-values\n",
" x = np.array(range(1, len(df_filtered['vx[kN/m]'])+1))\n",
" \n",
" # Create y-values for plot as shear forces vx\n",
" y = df_filtered['vx[kN/m]'].values\n",
" \n",
" # Extract indices where y-values are negative and positive, respectively\n",
" idx_neg = np.where(y<0)\n",
" idx_pos = np.where(y>=0)\n",
"\n",
" # Extract x-values where y-values are negative and positive, respectively\n",
" x_neg, x_pos = np.take(x, idx_neg)[0], np.take(x, idx_pos)[0]\n",
"\n",
" # Extract y-values where y-values are negative and positive, respectively\n",
" y_neg, y_pos = np.take(y, idx_neg)[0], np.take(y, idx_pos)[0]\n",
"\n",
" # Plot lines for negative and positve values as two separate lines\n",
" plt.plot(x_neg, y_neg, '.', color='salmon')\n",
" plt.plot(x_pos, y_pos, '.', color='cornflowerblue') \n",
" \n",
" # Fill between y=0 and the lines where y-values are negative and positive, respectively \n",
" plt.fill_between(x, y, where=y<0, color='salmon', alpha=0.25, interpolate=True)\n",
" plt.fill_between(x, y, where=y>=0, color='cornflowerblue', alpha=0.25, interpolate=True)\n",
" \n",
" # Set titles and x- and y-labels\n",
" plt.title(f'Shear force $vx$ [kN/m] for base slab shear key ${shear_key}$' + '\\n' +\n",
" f'{lc_title} $(LC: {lc}) ({version_number})$', fontsize=18)\n",
" plt.xlabel('Points along shear key', fontsize=14)\n",
" plt.ylabel('Slab shear force $vx$ [kN/m]', fontsize=14)\n",
" \n",
" # Save figure to png-file with meaningful name that varies in every loop\n",
" plt.savefig(f'Plots/{version_number}/{shear_key}_{lc}.png')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Explanations to some of the code lines are given below\n",
"\n",
"* **Line with `df_filtered = ...`:** The dataframe `df_filtered` is uniqie in every loop, since it is filtered based on the current load case and the nodes in the current shear key. The filtering is done based on the original large dataframe. Every operation from here on out inside the loop must use `df_filtered` and not the original dataframe. For filtering based on the nodes in the shear key `.isin` is used. This is a good way to filter based on values in a list. And the nodes for each shear key is is stored in the loop variable `nodes` as a list.\n",
"\n",
"\n",
"* **Line with `x = ...`:** This generates the x-values, which will just be increasing numbers from 1 and up. Note that `range(start, stop)` goes from `start` to `stop-1`. \n",
"\n",
"\n",
"* **Line with `y = ...`:** Collects the y-values for the plot in the current loop. `df['vx[kN/m']]` extracts a Series, which is a single column, but with index numbers to its left. To get only the column values as an array, we do `df['vx[kN/m']].values`. Btw, this will also work for dataframes, but will return a 'matrix' array instead of a 'vector' array as for Series.\n",
"\n",
"\n",
"* **Lines with `plt.plot()`:** The negative and positive points are plotting as two separate data series so they can have different colors. Connecting the points by lines makes the plot look strange after it has been separated, so only points are plotted. \n",
"\n",
"\n",
"* **Lines with `plt.fillbetween()`:** Parameter `alpha` set the opacity. Parameter `interpolate=True` will make sure the fill is not \"cut off\" near a crossing of the y-axis.\n",
"\n",
"\n",
"* **Lines with for `plt.title()`**: When creating plots by loops the title for each plot should probably have a unique title that varies with the loop variable(s). Then saving to a file this is important in order to not overwrite the plot with the same name in each loop. A convenient way to put variables inside text is by using f-strings. \n",
"\n",
"\n",
"* **Line with `plt.savefig()`** A subfolder called `Plots` and a subsubfolder with the version number `v20` have to be created before running this. It the folders are not present `FileNoteFoundError` will be raised. Alternatively, the png-files could be saved directly in the same folder as the script. In that case only `'<file_name>.png'` would be necessary as the argument for `plt.savefig()`. By saving in a subfolder whose name depends on the version number given in the name original txt file, it is easier to keep track of versions and avoid overwriting files from previous versions. \n",
"\n",
"## Improvements mentioned in exercise text\n",
"\n",
"### y-limits of plot\n",
"Set the y-limits of the plot by `plt.ylim([ymin, ymax])`. \n",
"\n",
"`ymin` and `ymax` could be determined as the largest occuring magnitude values among all plots. \n",
"\n",
"~~~python \n",
"# Put this line before the loop\n",
"all_loads = df['vx[kN/m]']\n",
"extr_magnitude = max(abs(min(all_loads)), abs(max(all_loads)))\n",
"~~~\n",
"\n",
"~~~python\n",
"# Put this line in each loop before saving the figure\n",
"plt.ylim([-1.1*extr_magnitude, 1.1*extr_magnitude])\n",
"~~~\n",
"\n",
"\n",
"### Annotations of local extrema\n",
"For annotating the local extrema points, define the function `find_local_extrema(y_curve)` from the exercise text somewhere in the script before the loop. \n",
"\n",
"Afterwards, include the lines below within the for loop. They should be placed somewhere between when the figure is creates and when it's saved.\n",
"\n",
"~~~python\n",
"# Find local extrema points of graph\n",
"extrema_indices = find_local_extrema(y)\n",
"\n",
"# Annotate each points on the plot\n",
"for extr_idx in extreme_indices:\n",
" ax.annotate(f'{y[extr_idx]:.0f}', xy=(x[extr_idx], y[extr_idx]), xytext=(x[extr_idx], y[extr_idx]))\n",
"~~~\n",
"\n",
"This annotates the local peaks which helps for readability of the graph. The annotations could be even better than this by ensuring that text does not overlap. and by always annotating the two end points of the graph.\n",
"\n",
"Documentation for annotating method: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.annotate.html \n",
"\n",
"### Another improvement\n",
"Instead of having to manually create the subdirectories `Plots` and `/v20`, we could check if they exist and create them if they don't. For this, we could use the built-in `os` module to manipulate the file system.\n",
"\n",
"One way of doing it is this:\n",
"\n",
"---\n",
"```python\n",
"# Check if directory exists\n",
"if not os.path.exists(directory):\n",
" # Create it if it doesn't\n",
" os.makedirs(directory)\n",
"```\n",
"---\n",
"\n",
"Where the variable `directory` is a string with the path to the desired directory.\n",
"\n",
"Make sure that `import os` is stated in the beginning of the script. This is a built-in module, so no installation is needed.\n",
"\n"
]
}
],
"metadata": {
@ -603,7 +612,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
"version": "3.7.4"
},
"latex_envs": {
"LaTeX_envs_menu_present": true,

File diff suppressed because one or more lines are too long

View File

@ -1,5 +1,204 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 8. Interpolation\n",
"\n",
"*If you haven't already installed the packages `numpy`, `pandas`, `xlrd` and `scipy`, please do so.*\n",
"\n",
"This session has no new material per se. It's supposed to combine elements from the previous sessions into a larger exercise. \n",
"\n",
"The exercise is about 3D interpolation. A set of known points $(x_{known}, x_{known}, z_{known})$ have prescribed values and are used as basis for interpolating $z$-values for a large set of points where only $(x, y)$ are known.\n",
"\n",
"For performing the actual interpolation, we call a function from a third party library called `scipy`. It's built on top of of `numpy` and holds many operations used in scientific analysis. \n",
"\n",
"The code originates from a COWI project where the points $(x, y)$ represent node coordinates from a base slab in a Finite Element model, while $z$-coordinates denote settlement values. The known points $(x_{known}, y_{known}, z_{known})$ stem from a detailed geotechnical analysis which could only be performed in a certain amount of points. The settlement values in the remaining points $(x, y)$ were therefore put into the FE-model as imposed displacements by a procedure similar to this. \n",
"\n",
"\n",
"# Exercise 1.1\n",
"Read through the code given in the script below and try to understand what it does and how it does it.\n",
"\n",
"Copy the script to your editor and run it. \n",
"\n",
"Add print statements if you are unsure about how a certain variable looks at any point throughout the code. Remember you can print the first five rows of a DataFrame with `df.head()`. \n",
"\n",
"The script reads two Excel files, i.e. one containing known points and one containing points to be interpolated. \n",
"One limitation of Excel files is that they cannot be read while they are open. If you want to inspect these files while running the script, create a copy. \n",
"\n",
"# Exercise 1.2\n",
"The bulk of the computational work in the script is done by the line:\n",
"\n",
"---\n",
"```python\n",
"settlements_interpolated = griddata(xy_known, settlements_known, (x_nodes, y_nodes), method='cubic')\n",
"```\n",
"---\n",
"This is the `scipy` function that performs the interpolation. Try to read the documentation for it [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html)\n",
"\n",
"The returned value that we save in the variable `settlements_interpolated` is a numpy array. You can see this by `print(type(settlements_interpolated))` which returns: `<class 'numpy.ndarray'>`. \n",
"\n",
"The last part of the given code creates a figure object and an axis object which enables 3D plots.\n",
"\n",
"**Continue the plotting code to add:**\n",
"\n",
"* 3D scatter plot of the known points $(x_{known}, y_{known}, z_{known})$\n",
"\n",
"* 3D scatter plot of the interpolated points $(x, y, z)$\n",
"\n",
"The plots should all be in the same figure and axis. Adding a scatter plot to an axis `ax` can be done as `ax.scatter(...)`.\n",
"\n",
"# Exercise 1.3\n",
"\n",
"As mentioned, this was used in a project for interpolating settlement values to be applied in an FE-model. The FE-software (Sofistik) has a certain input language which the interpolated values needed to be blended into. \n",
"\n",
"In this exercise we will construct the input `.dat`.file that Sofistik can read. \n",
"\n",
">**Note:** This procedure could be used to produce many file types. It's a good way to programmatically create input files to software. This is just one specific example of a use case.\n",
"\n",
"The Sofistik input file we want to create has this syntax:\n",
"\n",
"---\n",
"\n",
"<pre><code>\n",
"+PROG SOFILOAD \n",
"\n",
"LC 25 type 'SL' fact 1.0 facd 0.0 titl 'LT settlement all nodes'\n",
"\n",
" POIN NODE <font color='#1E90FF'>insert first node number</font> WIDE 0 TYPE WZZ <font color='#1E90FF'>insert first interpolated z-value</font>\n",
" ...\n",
" <font color='#1E90FF'><i>one line per pair of node number/z-value</i></font> \n",
" ...\n",
" POIN NODE <font color='#1E90FF'>insert last node number</font> WIDE 0 TYPE WZZ <font color='#1E90FF'>insert last interpolated z-value</font>\n",
" \n",
"END\n",
"</code></pre>\n",
"\n",
"---\n",
"\n",
"The indented block should print all the node/settlement pairs. The three non-indented lines should only appear once. The output file should look like the file `interpolation_output_example.dat` in the folder. Newlines are made by `\\n`.\n",
"\n",
"\n",
"## How to write to files\n",
"To write data to a file we can use something called a **context manager**. Basically, it allows us to open a file and write to it. See code snippet below:\n",
"\n",
"---\n",
"~~~python\n",
"# Use a context manager to open and write ('w') to file\n",
"with open('file_name.dat', 'w') as file:\n",
" \n",
" # The file can from here on out be referred to as file\n",
" file.write(\"This text will be written inside 'file_name.dat'\")\n",
"~~~\n",
"---\n",
"\n",
"By using the concept the file is automatically closed after our indented block is terminated. It also creates the file in case it doesn't already exist. \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# The script\n",
"\n",
"~~~python\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"from scipy.interpolate import griddata\n",
"import matplotlib.pyplot as plt\n",
"from mpl_toolkits.mplot3d import Axes3D\n",
"\n",
"\n",
"# Set name of Excel file to read containing known points\n",
"file_known = 'known_points.xlsx'\n",
"\n",
"# Set name of sheet to read from Excel file\n",
"sheet_known = 'Sheet1'\n",
"\n",
"# Read data from Excel sheet into a dataframe\n",
"df = pd.read_excel(file_known, sheet_name=sheet_known, skiprows=7)\n",
"\n",
"# Extract column names starting with 'Y' into new dataframe of known Y-coords\n",
"df_y = df[df.columns[df.columns.str.startswith('Y')]]\n",
"\n",
"# Extract column names starting with 'Z' into new dataframe of known Z-coords\n",
"df_z_known = df[df.columns[df.columns.str.startswith('Z')]]\n",
"\n",
"# Flatten dataframe values into 1D array (matri format -> vector format)\n",
"y_known = df_y.values.flatten()\n",
"z_known = df_z_known.values.flatten()\n",
"\n",
"# Extract known x-values\n",
"x_known = df['X']\n",
"\n",
"# Create X-array by repeating itself as many times as there are Y-columns\n",
"# This will create matching(x, y)-points between arrays x and y\n",
"x_known = np.repeat(x_known, len(df_y.columns))\n",
"\n",
"# Mirror known y-values and add corresponding x- and y-values\n",
"x_known = np.append(x_known, x_known)\n",
"y_known = np.append(y_known, -y_known)\n",
"z_known = np.append(z_known, z_known)\n",
"\n",
"# Arrange known (x, y) points to fit input for interpolation\n",
"xy_known = np.array(list(zip(x_known, y_known)))\n",
"\n",
"# Set names and read Excel file with nodes to be interpolated\n",
"file_nodes = 'points_to_be_interpolated.xlsx'\n",
"sheet_nodes = 'XLSX-Export'\n",
"df_nodes = pd.read_excel(file_nodes, sheet_name=sheet_nodes)\n",
"\n",
"# Extract x- and y-coordinates of nodes to be interpolated\n",
"x_nodes = df_nodes['X [m]']\n",
"y_nodes = df_nodes['Y [m]']\n",
"\n",
"# Extract node numbers for points to be interpolated\n",
"node_no = df_nodes['NR']\n",
"\n",
"# Perform interpolation calculation\n",
"points_interpolated = griddata(xy_known, z_known, (x_nodes, y_nodes), method='cubic')\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.2 ###\n",
"####################\n",
"# Create figure object\n",
"fig = plt.figure()\n",
"\n",
"# Create axis object for 3D plot\n",
"ax = fig.add_subplot(111, projection='3d')\n",
"\n",
"# Plot known points as 3D scatter plot (ax.scatter(...))\n",
" # <Put plotting code here!>\n",
"\n",
"# Plot interpolated points as 3D scatter plot\n",
" # <Put plotting code here!>\n",
"\n",
"# Show figure\n",
" # <Put plotting code here!>\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.3 ###\n",
"####################\n",
"# Write Sofistik input code to .dat-file for applying the interpolated z-values as \n",
"# imposed displacement load in all points (x, y)\n",
" # <Put code that creates and writes to a .dat file here!> \n",
"\n",
"~~~"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# End of exercises\n",
"\n",
"*The cell below is for setting the style of this document. It's not part of the exercises.*"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -284,196 +483,6 @@
"from IPython.display import HTML\n",
"HTML('<style>{}</style>'.format(open('../css/cowi.css').read()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 8. Interpolation\n",
"\n",
"*If you haven't already installed the packages `numpy`, `pandas`, `xlrd` and `scipy`, please do so.*\n",
"\n",
"This session has no new material per se. It's supposed to combine elements from the previous sessions into a larger exercise. \n",
"\n",
"The exercise is about 3D interpolation. A set of known points $(x_{known}, x_{known}, z_{known})$ have prescribed values and are used as basis for interpolating $z$-values for a large set of points where only $(x, y)$ are known.\n",
"\n",
"For performing the actual interpolation, we call a function from a third party library called `scipy`. It's built on top of of `numpy` and holds many operations used in scientific analysis. \n",
"\n",
"The code originates from a COWI project where the points $(x, y)$ represent node coordinates from a base slab in a Finite Element model, while $z$-coordinates denote settlement values. The known points $(x_{known}, y_{known}, z_{known})$ stem from a detailed geotechnical analysis which could only be performed in a certain amount of points. The settlement values in the remaining points $(x, y)$ were therefore put into the FE-model as imposed displacements by a procedure similar to this. \n",
"\n",
"\n",
"# Exercise 1.1\n",
"Read through the code given in the script below and try to understand what it does and how it does it.\n",
"\n",
"Copy the script to your editor and run it. \n",
"\n",
"Add print statements if you are unsure about how a certain variable looks at any point throughout the code. Remember you can print the first five rows of a DataFrame with `df.head()`. \n",
"\n",
"The script reads two Excel files, i.e. one containing known points and one containing points to be interpolated. \n",
"One limitation of Excel files is that they cannot be read while they are open. If you want to inspect these files while running the script, create a copy. \n",
"\n",
"# Exercise 1.2\n",
"The bulk of the computational work in the script is done by the line:\n",
"\n",
"---\n",
"```python\n",
"settlements_interpolated = griddata(xy_known, settlements_known, (x_nodes, y_nodes), method='cubic')\n",
"```\n",
"---\n",
"This is the `scipy` function that performs the interpolation. Try to read the documentation for it [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html)\n",
"\n",
"The returned value that we save in the variable `settlements_interpolated` is a numpy array. You can see this by `print(type(settlements_interpolated))` which returns: `<class 'numpy.ndarray'>`. \n",
"\n",
"The last part of the given code creates a figure object and an axis object which enables 3D plots.\n",
"\n",
"**Continue the plotting code to add:**\n",
"\n",
"* 3D scatter plot of the known points $(x_{known}, y_{known}, z_{known})$\n",
"\n",
"* 3D scatter plot of the interpolated points $(x, y, z)$\n",
"\n",
"The plots should all be in the same figure and axis. Adding a scatter plot to an axis `ax` can be done as `ax.scatter(...)`.\n",
"\n",
"# Exercise 1.3\n",
"\n",
"As mentioned, this was used in a project for interpolating settlement values to be applied in an FE-model. The FE-software (Sofistik) has a certain input language which the interpolated values needed to be blended into. \n",
"\n",
"In this exercise we will construct the input `.dat`.file that Sofistik can read. \n",
"\n",
">**Note:** This procedure could be used to produce many file types. It's a good way to programmatically create input files to software. This is just one specific example of a use case.\n",
"\n",
"The Sofistik input file we want to create has this syntax:\n",
"\n",
"---\n",
"\n",
"<pre><code>\n",
"+PROG SOFILOAD \n",
"\n",
"LC 25 type 'SL' fact 1.0 facd 0.0 titl 'LT settlement all nodes'\n",
"\n",
" POIN NODE <font color='#1E90FF'>insert first node number</font> WIDE 0 TYPE WZZ <font color='#1E90FF'>insert first interpolated z-value</font>\n",
" ...\n",
" <font color='#1E90FF'><i>one line per pair of node number/z-value</i></font> \n",
" ...\n",
" POIN NODE <font color='#1E90FF'>insert last node number</font> WIDE 0 TYPE WZZ <font color='#1E90FF'>insert last interpolated z-value</font>\n",
" \n",
"END\n",
"</code></pre>\n",
"\n",
"---\n",
"\n",
"The indented block should print all the node/settlement pairs. The three non-indented lines should only appear once. The output file should look like the file `interpolation_output_example.dat` in the folder. Newlines are made by `\\n`.\n",
"\n",
"\n",
"## How to write to files\n",
"To write data to a file we can use something called a **context manager**. Basically, it allows us to open a file and write to it. See code snippet below:\n",
"\n",
"---\n",
"~~~python\n",
"# Use a context manager to open and write ('w') to file\n",
"with open('file_name.dat', 'w') as file:\n",
" \n",
" # The file can from here on out be referred to as file\n",
" file.write(\"This text will be written inside 'file_name.dat'\")\n",
"~~~\n",
"---\n",
"\n",
"By using the concept the file is automatically closed after our indented block is terminated. It also creates the file in case it doesn't already exist. \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# The script\n",
"\n",
"~~~python\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"from scipy.interpolate import griddata\n",
"import matplotlib.pyplot as plt\n",
"from mpl_toolkits.mplot3d import Axes3D\n",
"\n",
"\n",
"# Set name of Excel file to read containing known points\n",
"file_known = 'known_points.xlsx'\n",
"\n",
"# Set name of sheet to read from Excel file\n",
"sheet_known = 'Sheet1'\n",
"\n",
"# Read data from Excel sheet into a dataframe\n",
"df = pd.read_excel(file_known, sheet_name=sheet_known, skiprows=7)\n",
"\n",
"# Extract column names starting with 'Y' into new dataframe of known Y-coords\n",
"df_y = df[df.columns[df.columns.str.startswith('Y')]]\n",
"\n",
"# Extract column names starting with 'Z' into new dataframe of known Z-coords\n",
"df_z_known = df[df.columns[df.columns.str.startswith('Z')]]\n",
"\n",
"# Flatten dataframe values into 1D array (matri format -> vector format)\n",
"y_known = df_y.values.flatten()\n",
"z_known = df_z_known.values.flatten()\n",
"\n",
"# Extract known x-values\n",
"x_known = df['X']\n",
"\n",
"# Create X-array by repeating itself as many times as there are Y-columns\n",
"# This will create matching(x, y)-points between arrays x and y\n",
"x_known = np.repeat(x_known, len(df_y.columns))\n",
"\n",
"# Mirror known y-values and add corresponding x- and y-values\n",
"x_known = np.append(x_known, x_known)\n",
"y_known = np.append(y_known, -y_known)\n",
"z_known = np.append(z_known, z_known)\n",
"\n",
"# Arrange known (x, y) points to fit input for interpolation\n",
"xy_known = np.array(list(zip(x_known, y_known)))\n",
"\n",
"# Set names and read Excel file with nodes to be interpolated\n",
"file_nodes = 'points_to_be_interpolated.xlsx'\n",
"sheet_nodes = 'XLSX-Export'\n",
"df_nodes = pd.read_excel(file_nodes, sheet_name=sheet_nodes)\n",
"\n",
"# Extract x- and y-coordinates of nodes to be interpolated\n",
"x_nodes = df_nodes['X [m]']\n",
"y_nodes = df_nodes['Y [m]']\n",
"\n",
"# Extract node numbers for points to be interpolated\n",
"node_no = df_nodes['NR']\n",
"\n",
"# Perform interpolation calculation\n",
"points_interpolated = griddata(xy_known, z_known, (x_nodes, y_nodes), method='cubic')\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.2 ###\n",
"####################\n",
"# Create figure object\n",
"fig = plt.figure()\n",
"\n",
"# Create axis object for 3D plot\n",
"ax = fig.add_subplot(111, projection='3d')\n",
"\n",
"# Plot known points as 3D scatter plot (ax.scatter(...))\n",
" # <Put plotting code here!>\n",
"\n",
"# Plot interpolated points as 3D scatter plot\n",
" # <Put plotting code here!>\n",
"\n",
"# Show figure\n",
" # <Put plotting code here!>\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.3 ###\n",
"####################\n",
"# Write Sofistik input code to .dat-file for applying the interpolated z-values as \n",
"# imposed displacement load in all points (x, y)\n",
" # <Put code that creates and writes to a .dat file here!> \n",
"\n",
"~~~"
]
}
],
"metadata": {
@ -493,7 +502,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
"version": "3.7.4"
},
"latex_envs": {
"LaTeX_envs_menu_present": true,

View File

@ -1,5 +1,127 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 8. Exercise solution\n",
"\n",
"The full script is provided below."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"~~~python \n",
"import pandas as pd\n",
"import numpy as np\n",
"from scipy.interpolate import griddata\n",
"import matplotlib.pyplot as plt\n",
"from mpl_toolkits.mplot3d import Axes3D\n",
"\n",
"\n",
"# Set name of Excel file to read containing known points\n",
"file_known = 'known_points.xlsx'\n",
"\n",
"# Set name of sheet to read from Excel file\n",
"sheet_known = 'Sheet1'\n",
"\n",
"# Read data from Excel sheet into a dataframe\n",
"df = pd.read_excel(file_known, sheet_name=sheet_known, skiprows=7)\n",
"\n",
"# Extract column names starting with 'Y' into new dataframe of known Y-coords\n",
"df_y = df[df.columns[df.columns.str.startswith('Y')]]\n",
"\n",
"# Extract column names starting with 'Z' into new dataframe of known Z-coords\n",
"df_z_known = df[df.columns[df.columns.str.startswith('Z')]]\n",
"\n",
"# Flatten dataframe values into 1D array (matri format -> vector format)\n",
"y_known = df_y.values.flatten()\n",
"z_known = df_z_known.values.flatten()\n",
"\n",
"# Extract known x-values\n",
"x_known = df['X']\n",
"\n",
"# Create X-array by repeating itself as many times as there are Y-columns\n",
"# This will create matching(x, y)-points between arrays x and y\n",
"x_known = np.repeat(x_known, len(df_y.columns))\n",
"\n",
"# Mirror known y-values and add corresponding x- and y-values\n",
"x_known = np.append(x_known, x_known)\n",
"y_known = np.append(y_known, -y_known)\n",
"z_known = np.append(z_known, z_known)\n",
"\n",
"# Arrange known (x, y) points to fit input for interpolation\n",
"xy_known = np.array(list(zip(x_known, y_known)))\n",
"\n",
"# Set names and read Excel file with nodes to be interpolated\n",
"file_nodes = 'points_to_be_interpolated.xlsx'\n",
"sheet_nodes = 'XLSX-Export'\n",
"df_nodes = pd.read_excel(file_nodes, sheet_name=sheet_nodes)\n",
"\n",
"# Extract x- and y-coordinates of nodes to be interpolated\n",
"x_nodes = df_nodes['X [m]']\n",
"y_nodes = df_nodes['Y [m]']\n",
"\n",
"# Extract node numbers for points to be interpolated\n",
"node_no = df_nodes['NR']\n",
"\n",
"# Perform interpolation calculation\n",
"z_interpolated = griddata(xy_known, z_known, (x_nodes, y_nodes), method='cubic')\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.2 ###\n",
"####################\n",
"# Create figure object\n",
"fig = plt.figure()\n",
"\n",
"# Create axis object for 3D plot\n",
"ax = fig.add_subplot(111, projection='3d')\n",
"\n",
"# Plot known points as 3D scatter plot (ax.scatter(...))\n",
"ax.scatter(x_known, y_known, z_known, '-.', color='limegreen')\n",
"\n",
"# Plot interpolated points as 3D scatter plot\n",
"ax.scatter(x_nodes, y_nodes, z_interpolated,\n",
" '.', color='cornflowerblue', s=0.1)\n",
"\n",
"# Show figure\n",
"plt.show()\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.3 ###\n",
"####################\n",
"# Write Sofistik input code to .dat-file for applying the interpolated z-values as\n",
"# imposed displacement load (settlement) in all points (x, y)\n",
"with open(f'generated_file.dat', 'w') as file:\n",
"\n",
" # Write the 'static' text to file\n",
" file.write('''+PROG SOFILOAD \n",
"\n",
"LC 25 type 'P' fact 1.0 facd 0.0 titl 'LT settlement all nodes' \\n''')\n",
"\n",
" # Write the 'variable' text to file with node number/settlement pairs\n",
" for node, settlement in zip(node_no, z_interpolated):\n",
" file.write(f' POIN NODE {node} WIDE 0 TYPE WZZ {settlement} \\n')\n",
"\n",
" # Write 'static' END statement to file\n",
" file.write('END')\n",
"\n",
"~~~"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# End of exercises\n",
"\n",
"*The cell below is for setting the style of this document. It's not part of the exercises.*"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -284,119 +406,6 @@
"from IPython.display import HTML\n",
"HTML('<style>{}</style>'.format(open('../css/cowi.css').read()))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 8. Exercise solution\n",
"\n",
"The full script is provided below."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"~~~python \n",
"import pandas as pd\n",
"import numpy as np\n",
"from scipy.interpolate import griddata\n",
"import matplotlib.pyplot as plt\n",
"from mpl_toolkits.mplot3d import Axes3D\n",
"\n",
"\n",
"# Set name of Excel file to read containing known points\n",
"file_known = 'known_points.xlsx'\n",
"\n",
"# Set name of sheet to read from Excel file\n",
"sheet_known = 'Sheet1'\n",
"\n",
"# Read data from Excel sheet into a dataframe\n",
"df = pd.read_excel(file_known, sheet_name=sheet_known, skiprows=7)\n",
"\n",
"# Extract column names starting with 'Y' into new dataframe of known Y-coords\n",
"df_y = df[df.columns[df.columns.str.startswith('Y')]]\n",
"\n",
"# Extract column names starting with 'Z' into new dataframe of known Z-coords\n",
"df_z_known = df[df.columns[df.columns.str.startswith('Z')]]\n",
"\n",
"# Flatten dataframe values into 1D array (matri format -> vector format)\n",
"y_known = df_y.values.flatten()\n",
"z_known = df_z_known.values.flatten()\n",
"\n",
"# Extract known x-values\n",
"x_known = df['X']\n",
"\n",
"# Create X-array by repeating itself as many times as there are Y-columns\n",
"# This will create matching(x, y)-points between arrays x and y\n",
"x_known = np.repeat(x_known, len(df_y.columns))\n",
"\n",
"# Mirror known y-values and add corresponding x- and y-values\n",
"x_known = np.append(x_known, x_known)\n",
"y_known = np.append(y_known, -y_known)\n",
"z_known = np.append(z_known, z_known)\n",
"\n",
"# Arrange known (x, y) points to fit input for interpolation\n",
"xy_known = np.array(list(zip(x_known, y_known)))\n",
"\n",
"# Set names and read Excel file with nodes to be interpolated\n",
"file_nodes = 'points_to_be_interpolated.xlsx'\n",
"sheet_nodes = 'XLSX-Export'\n",
"df_nodes = pd.read_excel(file_nodes, sheet_name=sheet_nodes)\n",
"\n",
"# Extract x- and y-coordinates of nodes to be interpolated\n",
"x_nodes = df_nodes['X [m]']\n",
"y_nodes = df_nodes['Y [m]']\n",
"\n",
"# Extract node numbers for points to be interpolated\n",
"node_no = df_nodes['NR']\n",
"\n",
"# Perform interpolation calculation\n",
"z_interpolated = griddata(xy_known, z_known, (x_nodes, y_nodes), method='cubic')\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.2 ###\n",
"####################\n",
"# Create figure object\n",
"fig = plt.figure()\n",
"\n",
"# Create axis object for 3D plot\n",
"ax = fig.add_subplot(111, projection='3d')\n",
"\n",
"# Plot known points as 3D scatter plot (ax.scatter(...))\n",
"ax.scatter(x_known, y_known, z_known, '-.', color='limegreen')\n",
"\n",
"# Plot interpolated points as 3D scatter plot\n",
"ax.scatter(x_nodes, y_nodes, z_interpolated,\n",
" '.', color='cornflowerblue', s=0.1)\n",
"\n",
"# Show figure\n",
"plt.show()\n",
"\n",
"\n",
"####################\n",
"### Exercise 1.3 ###\n",
"####################\n",
"# Write Sofistik input code to .dat-file for applying the interpolated z-values as\n",
"# imposed displacement load (settlement) in all points (x, y)\n",
"with open(f'generated_file.dat', 'w') as file:\n",
"\n",
" # Write the 'static' text to file\n",
" file.write('''+PROG SOFILOAD \n",
"\n",
"LC 25 type 'P' fact 1.0 facd 0.0 titl 'LT settlement all nodes' \\n''')\n",
"\n",
" # Write the 'variable' text to file with node number/settlement pairs\n",
" for node, settlement in zip(node_no, z_interpolated):\n",
" file.write(f' POIN NODE {node} WIDE 0 TYPE WZZ {settlement} \\n')\n",
"\n",
" # Write 'static' END statement to file\n",
" file.write('END')\n",
"\n",
"~~~"
]
}
],
"metadata": {
@ -416,7 +425,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
"version": "3.7.4"
},
"latex_envs": {
"LaTeX_envs_menu_present": true,

File diff suppressed because one or more lines are too long