1"""Functions to structure data and generate caverns with constraints.
2
3References
4----------
5.. [#Brennan20] Brennan, J. (2020). ‘Fast and easy gridding of point data with
6 geopandas’, 16 March. Available at:
7 https://james-brennan.github.io/posts/fast_gridding_geopandas/
8 (Accessed: 1 January 2024).
9.. [#Williams22] Williams, J. D. O., Williamson, J. P., Parkes, D., Evans, D.
10 J., Kirk, K. L., Sunny, N., Hough, E., Vosper, H., and Akhurst, M. C.
11 (2022). ‘Does the United Kingdom have sufficient geological storage
12 capacity to support a hydrogen economy? Estimating the salt cavern storage
13 potential of bedded halite formations’, Journal of Energy Storage, 53, p.
14 105109. https://doi.org/10.1016/j.est.2022.105109.
15.. [#Chan19] Chan, S. (2019). ‘Left Join – Building a hexagonal cartogram
16 with Python’, 7 September. Available at:
17 https://sabrinadchan.github.io/data-blog/building-a-hexagonal-cartogram.html
18 (Accessed: 1 January 2024).
19.. [#DECC23] Department of the Environment, Climate and Communications (2023).
20 Draft Offshore Renewable Energy Development Plan II (OREDP II). Government
21 of Ireland. Available at:
22 https://www.gov.ie/en/publication/71e36-offshore-renewable-energy-development-plan-ii-oredp-ii/
23 (Accessed: 1 October 2023).
24.. [#NIST16] National Institute of Standards and Technology (2016). ‘Units
25 Outside the SI’, in NIST Guide to the SI. (Special Publication, 811).
26 Available at:
27 https://www.nist.gov/pml/special-publication-811/nist-guide-si-chapter-5-units-outside-si
28 (Accessed: 30 November 2023).
29.. [#RE21] Renewables Ireland (2021). Dublin Array Offshore Wind Farm:
30 Supporting Information Report - Annex D: Marine Archaeology Assessment.
31 Available at:
32 https://assets.gov.ie/202763/9160dd51-b119-44cf-a224-8b5302347e7d.pdf
33 (Accessed: 10 November 2023).
34"""
35
36import geopandas as gpd
37import numpy as np
38import pandas as pd
39import shapely
40import xarray as xr
41
42from h2ss import data as rd
43
44ROOF_THICKNESS = 80
45FLOOR_THICKNESS = 10
46CAVERN_DIAMETER = 85
47CAVERN_SEPARATION = CAVERN_DIAMETER * 4
48PILLAR_WIDTH = CAVERN_DIAMETER * 3
49NTG_SLOPE = 0.0009251759226446605
50NTG_INTERCEPT = 0.2616769604617021
51MAX_NTG = 0.75
52
53
[docs]
54def net_to_gross(
55 dat_xr, slope=NTG_SLOPE, intercept=NTG_INTERCEPT, max_ntg=MAX_NTG
56):
57 """Estimate the net-to-gross for a given halite thickness.
58
59 Parameters
60 ----------
61 dat_xr : xarray.Dataset
62 Xarray dataset of the halite data
63 slope : float
64 Slope of the net-to-gross linear regression
65 intercept : float
66 y-intercept of the net-to-gross linear regression
67 max_ntg : float
68 Maximum allowed value for the net-to-gross
69
70 Returns
71 -------
72 xarray.Dataset
73 Xarray dataset of the halite with net-to-gross information
74 """
75 ntg = slope * dat_xr.Thickness + intercept
76 ntg = xr.where(ntg > max_ntg, max_ntg, ntg)
77 dat_xr = dat_xr.assign(NetToGross=ntg)
78 dat_xr = dat_xr.assign(ThicknessNet=dat_xr.Thickness * dat_xr.NetToGross)
79 return dat_xr
80
81
[docs]
82def zones_of_interest(
83 dat_xr,
84 constraints,
85 roof_thickness=ROOF_THICKNESS,
86 floor_thickness=FLOOR_THICKNESS,
87 max_ntg=MAX_NTG,
88):
89 """Generate zones of interest by applying thickness and depth constraints.
90
91 Parameters
92 ----------
93 dat_xr : xarray.Dataset
94 Xarray dataset of the halite data
95 constraints : dict[str, float]
96 Dictionary containing the following:
97 ``"net_height"``: net cavern height [m];
98 ``"min_depth"``: minimum cavern depth [m];
99 ``"max_depth"``: maximum cavern depth [m]
100 roof_thickness : float
101 Salt roof thickness [m]
102 floor_thickness : float
103 Minimum salt floor thickness [m]
104 max_ntg : float
105 Maximum allowed value for the net-to-gross
106
107 Returns
108 -------
109 tuple[geopandas.GeoDataFrame, xarray.Dataset]
110 A (multi)polygon geodataframe of the zones of interest and an Xarray
111 dataset of the zones of interest
112 """
113 dat_xr = net_to_gross(dat_xr=dat_xr, max_ntg=max_ntg)
114 zds = dat_xr.where(
115 (
116 (
117 dat_xr.ThicknessNet
118 >= constraints["net_height"] + roof_thickness + floor_thickness
119 )
120 & (
121 dat_xr.TopDepthSeabed
122 >= constraints["min_depth"] - roof_thickness
123 )
124 & (
125 dat_xr.TopDepthSeabed
126 <= constraints["max_depth"] - roof_thickness
127 )
128 ),
129 drop=True,
130 )
131 # zones of interest polygon
132 zdf = (
133 zds.max(dim="halite")["Thickness"]
134 .to_dataframe()
135 .dropna()
136 .reset_index()
137 )
138 zdf = gpd.GeoDataFrame(
139 geometry=gpd.GeoSeries(gpd.points_from_xy(zdf.x, zdf.y))
140 .buffer(100)
141 .envelope,
142 crs=rd.CRS,
143 ).dissolve()
144 return zdf, zds
145
146
[docs]
147def generate_caverns_square_grid(
148 dat_extent,
149 zones_df,
150 diameter=CAVERN_DIAMETER,
151 separation=CAVERN_SEPARATION,
152):
153 """Generate salt caverns using a regular square grid.
154
155 Parameters
156 ----------
157 dat_extent : geopandas.GeoSeries
158 Extent of the data
159 zones_df : geopandas.GeoDataFrame
160 Zones of interest
161 diameter : float
162 Diameter of the cavern [m]
163 separation : float
164 Cavern separation distance [m]
165
166 Returns
167 -------
168 geopandas.GeoDataFrame
169 A polygon geodataframe of potential caverns in the zone of interest
170
171 Notes
172 -----
173 Gridding method based on [#Brennan20]_.
174 """
175 xmin_, ymin_, xmax_, ymax_ = dat_extent.total_bounds
176
177 # create the cells in a loop
178 grid_cells = []
179 for x0 in np.arange(xmin_, xmax_ + separation, separation):
180 for y0 in np.arange(ymin_, ymax_ + separation, separation):
181 # bounds
182 x1 = x0 - separation
183 y1 = y0 + separation
184 grid_cells.append(shapely.geometry.box(x0, y0, x1, y1))
185 grid_cells = gpd.GeoDataFrame(grid_cells, columns=["geometry"], crs=rd.CRS)
186
187 # generate caverns within the zones of interest
188 cavern_df = gpd.sjoin(
189 gpd.GeoDataFrame(
190 geometry=grid_cells.centroid.buffer(diameter / 2)
191 ).drop_duplicates(),
192 zones_df,
193 predicate="within",
194 )
195
196 cavern_df.drop(columns=["index_right"], inplace=True)
197
198 return cavern_df
199
200
[docs]
201def hexgrid_init(dat_extent, separation):
202 """Initialise variables for ``generate_caverns_hexagonal_grid``.
203
204 Parameters
205 ----------
206 dat_extent : geopandas.GeoSeries
207 Extent of the data
208 separation : float
209 Cavern separation distance [m]
210
211 Returns
212 -------
213 tuple[float, list[int], list[int]]
214 a, cols, and rows
215 """
216 xmin_, ymin_, xmax_, ymax_ = dat_extent.total_bounds
217 a = np.sin(np.pi / 3)
218 cols = np.arange(np.floor(xmin_), np.ceil(xmax_), 3 * separation)
219 rows = np.arange(np.floor(ymin_) / a, np.ceil(ymax_) / a, separation)
220 return a, cols, rows
221
222
[docs]
223def generate_caverns_hexagonal_grid(
224 dat_extent,
225 zones_df,
226 diameter=CAVERN_DIAMETER,
227 separation=CAVERN_SEPARATION,
228):
229 """Generate caverns in a regular hexagonal grid.
230
231 Parameters
232 ----------
233 dat_extent : geopandas.GeoSeries
234 Extent of the data
235 zones_df : geopandas.GeoDataFrame
236 Zones of interest
237 diameter : float
238 Diameter of the cavern [m]
239 separation : float
240 Cavern separation distance [m]
241
242 Returns
243 -------
244 geopandas.GeoDataFrame
245 A polygon geodataframe of potential caverns
246
247 Notes
248 -----
249 The close-packed hexagonal grid configuration was proposed by
250 [#Williams22]_; this configuration provides around 15% more caverns
251 compared to a square grid configuration. Hexagonal gridding method based
252 on [#Chan19]_.
253 """
254 a, cols, rows = hexgrid_init(dat_extent=dat_extent, separation=separation)
255
256 cavern_df = []
257 for x in cols:
258 for i, y in enumerate(rows):
259 if i % 2 == 0:
260 x0 = x
261 else:
262 x0 = x + 1.5 * separation
263
264 # hexagon vertices
265 cavern_df.append(shapely.geometry.Point(x0, y * a))
266 cavern_df.append(shapely.geometry.Point(x0 + separation, y * a))
267 cavern_df.append(
268 shapely.geometry.Point(
269 x0 + (1.5 * separation), (y + separation) * a
270 )
271 )
272 cavern_df.append(
273 shapely.geometry.Point(
274 x0 + separation, (y + (2 * separation)) * a
275 )
276 )
277 cavern_df.append(
278 shapely.geometry.Point(x0, (y + (2 * separation)) * a)
279 )
280 cavern_df.append(
281 shapely.geometry.Point(
282 x0 - (0.5 * separation), (y + separation) * a
283 )
284 )
285 # hexagon centroid
286 cavern_df.append(
287 shapely.geometry.Point(
288 x0 + (0.5 * separation), (y + separation) * a
289 )
290 )
291
292 # generate caverns using hexagon vertices and centroids
293 cavern_df = gpd.GeoDataFrame(
294 geometry=gpd.GeoDataFrame(geometry=cavern_df, crs=rd.CRS)
295 .drop_duplicates()
296 .buffer(diameter / 2)
297 )
298
299 # clip caverns to the zones of interest
300 cavern_df = gpd.sjoin(cavern_df, zones_df, predicate="within")
301 cavern_df.drop(columns=["index_right"], inplace=True)
302
303 return cavern_df
304
305
[docs]
306def cavern_dataframe(
307 dat_zone,
308 cavern_df,
309 depths,
310 roof_thickness=ROOF_THICKNESS,
311):
312 """Merge halite data for each cavern location and create a dataframe.
313
314 Parameters
315 ----------
316 dat_zone : xarray.Dataset
317 Xarray dataset for the zone of interest
318 cavern_df : geopandas.GeoDataFrame
319 Geodataframe of caverns within the zone of interest
320 depths : dict[str, float]
321 Dictionary of cavern top depth ranges [m] for labelling:
322 ``"min"``: minimum depth;
323 ``"min_opt"``: minimum optimal depth;
324 ``"max_opt"``: maximum optimal depth;
325 ``"max"``: maximum depth
326 roof_thickness : float
327 Salt roof thickness [m]
328
329 Returns
330 -------
331 geopandas.GeoDataFrame
332 The cavern geodataframe with halite height and depth data for only the
333 thickest halite layer at each given point
334 """
335 # read the halite dataset into a dataframe
336 zdf = (
337 dat_zone.to_dataframe()[list(dat_zone.data_vars)]
338 .dropna()
339 .reset_index()
340 )
341
342 # generate a square grid for the dataset
343 zdf = gpd.GeoDataFrame(
344 zdf,
345 geometry=gpd.GeoSeries(gpd.points_from_xy(zdf.x, zdf.y))
346 .buffer(100)
347 .envelope,
348 crs=rd.CRS,
349 )
350
351 # merge with the cavern data
352 cavern_df = gpd.sjoin(cavern_df, zdf)
353 cavern_df.drop(columns=["index_right"], inplace=True)
354
355 # remove duplicate caverns at each location - keep the layer at the optimal
356 # depth, followed by the thicker layer
357 conditions = [
358 (cavern_df["TopDepthSeabed"] < (depths["min_opt"] - roof_thickness)),
359 (cavern_df["TopDepthSeabed"] >= (depths["min_opt"] - roof_thickness))
360 & (
361 cavern_df["TopDepthSeabed"] <= (depths["max_opt"] - roof_thickness)
362 ),
363 (cavern_df["TopDepthSeabed"] > (depths["max_opt"] - roof_thickness)),
364 ]
365 choices = [1, 2, 1]
366 cavern_df["depth_criteria"] = np.select(conditions, choices)
367 cavern_df = cavern_df.sort_values(
368 ["depth_criteria", "ThicknessNet"], ascending=False
369 ).drop_duplicates(["geometry"])
370 cavern_df = cavern_df.drop(columns=["depth_criteria"])
371
372 return cavern_df
373
374
[docs]
375def label_caverns(
376 cavern_df,
377 depths,
378 heights=None,
379 roof_thickness=ROOF_THICKNESS,
380 floor_thickness=FLOOR_THICKNESS,
381):
382 """Label cavern dataframe by height and depth.
383
384 Parameters
385 ----------
386 cavern_df : geopandas.GeoDataFrame
387 Dataframe of potential caverns
388 depths : dict[str, float]
389 Dictionary of cavern top depth ranges [m] for labelling:
390 ``"min"``: minimum depth;
391 ``"min_opt"``: minimum optimal depth;
392 ``"max_opt"``: maximum optimal depth;
393 ``"max"``: maximum depth
394 heights : list[float] or None
395 List of fixed caverns heights [m] for labelling; if ``None``, the
396 actual height is used
397 roof_thickness : float
398 Salt roof thickness [m]
399 floor_thickness : float
400 Minimum salt floor thickness [m]
401
402 Returns
403 -------
404 geopandas.GeoDataFrame
405 A dataframe of potential caverns labelled by cavern height and top
406 depth ranges
407 """
408 # label caverns by height
409 if heights:
410 heights = sorted(heights)
411 if len(heights) == 1:
412 cavern_df["cavern_height"] = heights[0]
413 else:
414 conditions = []
415 for n, _ in enumerate(heights):
416 if n == 0:
417 conditions.append(
418 (
419 cavern_df["ThicknessNet"]
420 < (heights[1] + roof_thickness + floor_thickness)
421 )
422 )
423 elif n == len(heights) - 1:
424 conditions.append(
425 (
426 cavern_df["ThicknessNet"]
427 >= (heights[n] + roof_thickness + floor_thickness)
428 )
429 )
430 else:
431 conditions.append(
432 (
433 cavern_df["ThicknessNet"]
434 >= (heights[n] + roof_thickness + floor_thickness)
435 )
436 & (
437 cavern_df["ThicknessNet"]
438 < (
439 heights[n + 1]
440 + roof_thickness
441 + floor_thickness
442 )
443 )
444 )
445 choices = [str(x) for x in heights]
446 cavern_df["cavern_height"] = np.select(conditions, choices)
447 cavern_df["cavern_height"] = cavern_df["cavern_height"].astype(float)
448 else:
449 cavern_df["cavern_height"] = (
450 cavern_df["ThicknessNet"] - roof_thickness - floor_thickness
451 )
452
453 # label caverns by depth
454 conditions = [
455 (cavern_df["TopDepthSeabed"] < (depths["min_opt"] - roof_thickness)),
456 (cavern_df["TopDepthSeabed"] >= (depths["min_opt"] - roof_thickness))
457 & (
458 cavern_df["TopDepthSeabed"] <= (depths["max_opt"] - roof_thickness)
459 ),
460 (cavern_df["TopDepthSeabed"] > (depths["max_opt"] - roof_thickness)),
461 ]
462 choices = [
463 f"{depths['min']:,} - {depths['min_opt']:,}",
464 f"{depths['min_opt']:,} - {depths['max_opt']:,}",
465 f"{depths['max_opt']:,} - {depths['max']:,}",
466 ]
467 cavern_df["depth"] = np.select(conditions, choices)
468
469 cavern_df["cavern_depth"] = cavern_df["TopDepthSeabed"] + roof_thickness
470
471 return cavern_df
472
473
[docs]
474def constraint_halite_edge(dat_xr, buffer=PILLAR_WIDTH):
475 """The edge of each halite member as a constraint.
476
477 Parameters
478 ----------
479 dat_xr : xarray.Dataset
480 Xarray dataset of the halite data
481 buffer : float
482 Buffer [m]
483
484 Returns
485 -------
486 dict[str, geopandas.GeoDataFrame]
487 Dictionary of GeoPandas geodataframes of the halite edge constraint
488 for each halite member
489
490 Notes
491 -----
492 Set the buffer to 3 times the cavern diameter, i.e. the pillar width.
493 """
494 buffer_edge = {}
495 for halite in dat_xr.halite.values:
496 buffer_edge[halite] = rd.halite_shape(dat_xr=dat_xr, halite=halite)
497 buffer_edge[halite] = gpd.GeoDataFrame(
498 geometry=buffer_edge[halite].boundary.buffer(buffer)
499 ).overlay(buffer_edge[halite], how="intersection")
500 return buffer_edge
501
502
[docs]
503def constraint_exploration_well(data_path, buffer=500):
504 """Read exploration well data and generate constraint.
505
506 Parameters
507 ----------
508 data_path : str
509 Path to the Zip file
510 buffer : float
511 Buffer [m]
512
513 Returns
514 -------
515 tuple[geopandas.GeoDataFrame, geopandas.GeoDataFrame]
516 Geodataframes of the dataset and buffer
517
518 Notes
519 -----
520 500 m buffer - suggested in the draft OREDP II p. 108 [#DECC23]_.
521 """
522 wells = rd.read_shapefile_from_zip(data_path=data_path)
523 wells = wells[wells["AREA"].str.contains("Kish")].to_crs(rd.CRS)
524 wells_b = gpd.GeoDataFrame(geometry=wells.buffer(buffer))
525 return wells, wells_b
526
527
[docs]
528def constraint_wind_farm(data_path):
529 """Read data for wind farms.
530
531 Parameters
532 ----------
533 data_path : str
534 Path to the Zip file
535 dat_extent : geopandas.GeoSeries
536 Extent of the data
537
538 Returns
539 -------
540 geopandas.GeoDataFrame
541 Geodataframe of the dataset
542
543 Notes
544 -----
545 The shapes are used as is without a buffer - suggested for renewable
546 energy test site areas in the draft OREDP II p. 109 [#DECC23]_.
547 """
548 wind_farms = rd.read_shapefile_from_zip(data_path=data_path)
549 wind_farms = wind_farms.to_crs(rd.CRS)
550 # keep only wind farms near the Kish Basin
551 wind_farms.drop(index=[0, 1, 7], inplace=True)
552 # merge wind farm polygons
553 wind_farms.at[2, "name"] = "Dublin Array"
554 wind_farms.at[3, "name"] = "Dublin Array"
555 wind_farms = wind_farms.dissolve(by="name")
556 wind_farms.reset_index(inplace=True)
557 # assign capacities
558 wind_farms.sort_values(by=["name"], inplace=True)
559 wind_farms["cap"] = [1300, 824, 500]
560 return wind_farms
561
562
[docs]
563def constraint_shipping_routes(data_path, dat_extent, buffer=1852):
564 """Read frequent shipping route data and generate constraint.
565
566 Parameters
567 ----------
568 data_path : str
569 Path to the Zip file
570 dat_extent : geopandas.GeoSeries
571 Extent of the data
572 buffer : float
573 Buffer [m]
574
575 Returns
576 -------
577 tuple[geopandas.GeoDataFrame, geopandas.GeoDataFrame]
578 Geodataframes of the dataset and buffer
579
580 Notes
581 -----
582 1 NM (nautical mile) buffer - suggested in the draft OREDP II p. 108
583 [#DECC23]_. 1 NM is equivalent to 1,852 m [#NIST16]_.
584 """
585 shipping = rd.read_shapefile_from_zip(data_path=data_path)
586 # keep only features near Kish Basin
587 shipping = (
588 shipping.to_crs(rd.CRS)
589 .sjoin(gpd.GeoDataFrame(geometry=dat_extent.buffer(3000)))
590 .reset_index()
591 )
592 # crop
593 shipping = shipping.overlay(
594 gpd.GeoDataFrame(geometry=dat_extent.buffer(3000))
595 )
596 shipping.drop(columns=["index_right"], inplace=True)
597 shipping_b = gpd.GeoDataFrame(geometry=shipping.buffer(buffer)).dissolve()
598 return shipping, shipping_b
599
600
[docs]
601def constraint_shipwrecks(data_path, dat_extent, buffer=100):
602 """Read shipwreck data and generate constraint.
603
604 Parameters
605 ----------
606 data_path : str
607 Path to the Zip file
608 dat_extent : geopandas.GeoSeries
609 Extent of the data
610 buffer : float
611 Buffer [m]
612
613 Returns
614 -------
615 tuple[geopandas.GeoDataFrame, geopandas.GeoDataFrame]
616 Geodataframes of the dataset and buffer
617
618 Notes
619 -----
620 Archaeological Exclusion Zones recommendation - 100 m buffer [#RE21]_.
621 """
622 shipwrecks = rd.read_shapefile_from_zip(data_path=data_path)
623 # keep only features near Kish Basin
624 shipwrecks = (
625 shipwrecks.to_crs(rd.CRS)
626 .sjoin(gpd.GeoDataFrame(geometry=dat_extent.buffer(3000)))
627 .reset_index()
628 )
629 shipwrecks.drop(columns=["index_right"], inplace=True)
630 shipwrecks_b = gpd.GeoDataFrame(geometry=shipwrecks.buffer(buffer))
631 return shipwrecks, shipwrecks_b
632
633
[docs]
634def constraint_subsea_cables(data_path, dat_extent, buffer=750):
635 """Read subsea cable data and generate constraint.
636
637 Parameters
638 ----------
639 data_path : str
640 Path to the GPKG file
641 dat_extent : geopandas.GeoSeries
642 Extent of the data
643 buffer : float
644 Buffer [m]
645
646 Returns
647 -------
648 tuple[geopandas.GeoDataFrame, geopandas.GeoDataFrame]
649 Geodataframes of the dataset and buffer
650
651 Notes
652 -----
653 750 m buffer - suggested in the draft OREDP II p. 109-111 [#DECC23]_.
654 """
655 cables = gpd.read_file(data_path)
656 cables = cables.to_crs(rd.CRS)
657 # remove point features
658 cables = cables.drop(range(3)).reset_index(drop=True)
659 # crop
660 cables = cables.overlay(gpd.GeoDataFrame(geometry=dat_extent.buffer(3000)))
661 cables_b = gpd.GeoDataFrame(geometry=cables.buffer(buffer)).dissolve()
662 return cables, cables_b
663
664
[docs]
665def exclude_constraint(cavern_df, cavern_all, exclusions, key):
666 """Exclude constraint by their dictionary key.
667
668 Parameters
669 ----------
670 cavern_df : geopandas.GeoDataFrame
671 Dataframe of available caverns
672 cavern_all : geopandas.GeoDataFrame
673 Dataframe of all caverns, i.e. available and excluded
674 exclusions : dict[str, geopandas.GeoDataFrame]
675 Dictionary of exclusions data
676 key : str
677 Key for the constraint in the dictionary; one of the following:
678 ``"shipping"``: frequent shipping routes;
679 ``"cables"``: subsea cables;
680 ``"wind_farms"``: offshore wind farms;
681 ``"wells"``: exporation wells;
682 ``"shipwrecks"``: shipwrecks
683
684 Returns
685 -------
686 geopandas.GeoDataFrame
687 Dataframe of available caverns
688 """
689 try:
690 cavern_df = cavern_df.overlay(
691 gpd.sjoin(cavern_df, exclusions[key], predicate="intersects"),
692 how="difference",
693 )
694 print(f"Number of potential caverns: {len(cavern_df):,}")
695 pct = (len(cavern_all) - len(cavern_df)) / len(cavern_all) * 100
696 print(f"Caverns excluded: {pct:.2f}%")
697 except KeyError:
698 print("No data specified!")
699 print("-" * 60)
700 return cavern_df
701
702
[docs]
703def generate_caverns_with_constraints(cavern_df, exclusions):
704 """Add constraints to cavern configuration.
705
706 Parameters
707 ----------
708 cavern_df : geopandas.GeoDataFrame
709 Dataframe of available caverns
710 exclusions : dict[str, geopandas.GeoDataFrame]
711 Dictionary of exclusions data; ``"edge"`` must be present in the
712 dictionary, but if any other of the following keys do not exist in the
713 dictionary, the exclusion will be skipped:
714 ``"edge"``: halite edge, dict[str, geopandas.GeoDataFrame];
715 ``"shipping"``: frequent shipping routes;
716 ``"cables"``: subsea cables;
717 ``"wind_farms"``: offshore wind farms;
718 ``"wells"``: exporation wells;
719 ``"shipwrecks"``: shipwrecks
720
721 Returns
722 -------
723 tuple[geopandas.GeoDataFrame, geopandas.GeoDataFrame]
724 Dataframe of available and excluded caverns
725 """
726 print("Without constraints...")
727 print(f"Number of potential caverns: {len(cavern_df):,}")
728 print("-" * 60)
729
730 print("Excluding salt formation edges...")
731 cavern_dict = {}
732 for h in cavern_df["halite"].unique():
733 cavern_dict[h] = cavern_df[cavern_df["halite"] == h]
734 cavern_dict[h] = cavern_dict[h].overlay(
735 gpd.sjoin(
736 cavern_dict[h],
737 exclusions["edge"][h],
738 predicate="intersects",
739 ),
740 how="difference",
741 )
742 cavern_df = pd.concat(cavern_dict.values())
743 print(f"Number of potential caverns: {len(cavern_df):,}")
744 print("-" * 60)
745
746 # keep original
747 cavern_all = cavern_df.copy()
748
749 for c in ["shipping", "wind_farms", "cables", "wells", "shipwrecks"]:
750 print(f"Exclude {c.replace('_', ' ')}...")
751 cavern_df = exclude_constraint(
752 cavern_df=cavern_df,
753 cavern_all=cavern_all,
754 exclusions=exclusions,
755 key=c,
756 )
757
758 # get excluded caverns
759 caverns_excl = cavern_all.overlay(
760 gpd.sjoin(cavern_all, cavern_df, predicate="intersects"),
761 how="difference",
762 )
763
764 return cavern_df, caverns_excl
765
766
[docs]
767def read_weibull_data(data_path_weibull, data_path_wind_farms):
768 """Extract mean, max, and min Weibull parameters of wind speeds.
769
770 Parameters
771 ----------
772 data_path_weibull : str
773 Path to the Weibull parameter data Zip file
774 data_path_wind_farms : str
775 Path to the wind farm data Zip file
776
777 Returns
778 -------
779 pandas.DataFrame
780 Dataframe of k and C values for each wind farm
781
782 Notes
783 -----
784 Data extracted for each wind farm in the area of interest, i.e. Kish
785 Basin: Codling, Dublin Array, and North Irish Sea Array.
786 """
787 weibull_df = {}
788
789 for w in ["c", "k"]:
790 # read Weibull parameter data
791 weibull_df[w] = rd.read_shapefile_from_zip(
792 data_path=data_path_weibull, endswith=f"{w}_ITM.shp"
793 )
794
795 # read wind farm data
796 wind_farms = constraint_wind_farm(data_path=data_path_wind_farms)
797
798 # convert CRS and keep areas intersecting with wind farms
799 weibull_df[w] = (
800 weibull_df[w]
801 .to_crs(rd.CRS)
802 .overlay(wind_farms, how="intersection")
803 )
804
805 # rename column
806 weibull_df[w].rename(columns={"Value": w}, inplace=True)
807
808 # average c and k over wind farms
809 weibull_df[w] = wind_farms.merge(
810 weibull_df[w].dissolve(
811 by="name", aggfunc={w: ["min", "max", "mean"]}
812 ),
813 on="name",
814 )
815
816 # keep only relevant columns
817 weibull_df[w] = weibull_df[w][
818 ["name", "cap", (w, "min"), (w, "max"), (w, "mean")]
819 ]
820
821 # reset index
822 weibull_df[w] = weibull_df[w].reset_index(drop=True)
823
824 # merge
825 weibull_df = pd.merge(weibull_df["c"], weibull_df["k"], on=["name", "cap"])
826
827 return weibull_df