#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-vpp-device-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-coverage-2402" in path]
+filtered_paths = [path for path in paths if "report-coverage-2406" in path]
out_sdf = process_json_to_dataframe("device", filtered_paths)
out_sdf.printSchema()
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-coverage-2402" in path]
+filtered_paths = [path for path in paths if "report-coverage-2406" in path]
out_sdf = process_json_to_dataframe("hoststack", filtered_paths)
-out_sdf.show(truncate=False)
out_sdf.printSchema()
out_sdf = out_sdf \
.withColumn("year", lit(datetime.now().year)) \
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-coverage-2402" in path]
+filtered_paths = [path for path in paths if "report-coverage-2406" in path]
out_sdf = process_json_to_dataframe("mrr", filtered_paths)
out_sdf.printSchema()
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-coverage-2402" in path]
+filtered_paths = [path for path in paths if "report-coverage-2406" in path]
out_sdf = process_json_to_dataframe("ndrpdr", filtered_paths)
out_sdf.printSchema()
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-coverage-2402" in path]
+filtered_paths = [path for path in paths if "report-coverage-2406" in path]
out_sdf = process_json_to_dataframe("reconf", filtered_paths)
-out_sdf.show(truncate=False)
out_sdf.printSchema()
out_sdf = out_sdf \
.withColumn("year", lit(datetime.now().year)) \
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-coverage-2402" in path]
+filtered_paths = [path for path in paths if "report-coverage-2406" in path]
out_sdf = process_json_to_dataframe("soak", filtered_paths)
out_sdf.printSchema()
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/coverage_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-iterative-2402" in path]
+filtered_paths = [path for path in paths if "report-iterative-2406" in path]
out_sdf = process_json_to_dataframe("hoststack", filtered_paths)
-out_sdf.show(truncate=False)
out_sdf.printSchema()
out_sdf = out_sdf \
.withColumn("year", lit(datetime.now().year)) \
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-iterative-2402" in path]
+filtered_paths = [path for path in paths if "report-iterative-2406" in path]
out_sdf = process_json_to_dataframe("mrr", filtered_paths)
out_sdf.printSchema()
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-iterative-2402" in path]
+filtered_paths = [path for path in paths if "report-iterative-2406" in path]
out_sdf = process_json_to_dataframe("ndrpdr", filtered_paths)
out_sdf.printSchema()
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-iterative-2402" in path]
+filtered_paths = [path for path in paths if "report-iterative-2406" in path]
out_sdf = process_json_to_dataframe("reconf", filtered_paths)
out_sdf.show(truncate=False)
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
ignore_empty=True
)
-filtered_paths = [path for path in paths if "report-iterative-2402" in path]
+filtered_paths = [path for path in paths if "report-iterative-2406" in path]
out_sdf = process_json_to_dataframe("soak", filtered_paths)
out_sdf.printSchema()
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
- path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2402",
+ path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2406",
dataset=True,
partition_cols=["test_type", "year", "month", "day"],
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
from pyspark.sql.functions import lit
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="suite.info.json.gz"
IGNORE_SUFFIX=[]
for schema_name in ["sra"]:
out_sdf = process_json_to_dataframe(schema_name, paths)
- out_sdf.show(truncate=False)
out_sdf.printSchema()
out_sdf = out_sdf \
.withColumn("year", lit(datetime.now().year)) \
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+ try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+ except KeyError:
+ boto3_session = session.Session()
+ )
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
filtered_paths = [path for path in paths if "daily" in path or "weekly" in path]
out_sdf = process_json_to_dataframe("hoststack", filtered_paths)
-out_sdf.show(truncate=False)
out_sdf.printSchema()
out_sdf = out_sdf \
.withColumn("year", lit(datetime.now().year)) \
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
#!/usr/bin/env python3
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from pyspark.sql.types import StructType
-S3_LOGS_BUCKET="fdio-logs-s3-cloudfront-index"
-S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
+S3_LOGS_BUCKET=environ.get("S3_LOGS_BUCKET", "fdio-logs-s3-cloudfront-index")
+S3_DOCS_BUCKET=environ.get("S3_DOCS_BUCKET", "fdio-docs-s3-cloudfront-index")
PATH=f"s3://{S3_LOGS_BUCKET}/vex-yul-rot-jenkins-1/csit-*-perf-*"
SUFFIX="info.json.gz"
IGNORE_SUFFIX=[
filtered_paths = [path for path in paths if "daily" in path or "weekly" in path]
out_sdf = process_json_to_dataframe("soak", filtered_paths)
-out_sdf.show(truncate=False)
out_sdf.printSchema()
out_sdf = out_sdf \
.withColumn("year", lit(datetime.now().year)) \
.withColumn("day", lit(datetime.now().day)) \
.repartition(1)
+try:
+ boto3_session = session.Session(
+ aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
+ aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
+ region_name=environ["OUT_AWS_DEFAULT_REGION"]
+ )
+except KeyError:
+ boto3_session = session.Session()
+)
+
try:
wr.s3.to_parquet(
df=out_sdf.toPandas(),
compression="snappy",
use_threads=True,
mode="overwrite_partitions",
- boto3_session=session.Session(
- aws_access_key_id=environ["OUT_AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=environ["OUT_AWS_SECRET_ACCESS_KEY"],
- region_name=environ["OUT_AWS_DEFAULT_REGION"]
- )
+ boto3_session=boto3_session
)
except EmptyDataFrame:
pass
+++ /dev/null
-job "${job_name}" {
- datacenters = "${datacenters}"
- type = "${type}"
- periodic {
- cron = "${cron}"
- prohibit_overlap = "${prohibit_overlap}"
- time_zone = "${time_zone}"
- }
- group "${job_name}" {
- restart {
- mode = "fail"
- }
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
- task "${job_name}" {
- artifact {
- source = "git::https://github.com/FDio/csit"
- destination = "local/csit"
- }
- driver = "docker"
- config {
- image = "${image}"
- command = "gluesparksubmit"
- args = [
- "--driver-memory", "20g",
- "--executor-memory", "20g",
- "--executor-cores", "2",
- "--master", "local[2]",
- "coverage_device_rls2402.py"
- ]
- work_dir = "/local/csit/csit.infra.etl"
- }
- env {
- AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
- AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
- AWS_DEFAULT_REGION = "${aws_default_region}"
- OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
- OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
- OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
- ${ envs }
- }
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}
\ No newline at end of file
"--executor-memory", "20g",
"--executor-cores", "2",
"--master", "local[2]",
- "iterative_ndrpdr_rls2402.py"
+ "coverage_device_rls2406.py"
]
work_dir = "/local/csit/csit.infra.etl"
}
+++ /dev/null
-job "${job_name}" {
- datacenters = "${datacenters}"
- type = "${type}"
- periodic {
- cron = "${cron}"
- prohibit_overlap = "${prohibit_overlap}"
- time_zone = "${time_zone}"
- }
- group "${job_name}" {
- restart {
- mode = "fail"
- }
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
- task "${job_name}" {
- artifact {
- source = "git::https://github.com/FDio/csit"
- destination = "local/csit"
- }
- driver = "docker"
- config {
- image = "${image}"
- command = "gluesparksubmit"
- args = [
- "--driver-memory", "20g",
- "--executor-memory", "20g",
- "--executor-cores", "2",
- "--master", "local[2]",
- "coverage_hoststack_rls2402.py"
- ]
- work_dir = "/local/csit/csit.infra.etl"
- }
- env {
- AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
- AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
- AWS_DEFAULT_REGION = "${aws_default_region}"
- OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
- OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
- OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
- ${ envs }
- }
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}
\ No newline at end of file
--- /dev/null
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_hoststack_rls2406.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
+++ /dev/null
-job "${job_name}" {
- datacenters = "${datacenters}"
- type = "${type}"
- periodic {
- cron = "${cron}"
- prohibit_overlap = "${prohibit_overlap}"
- time_zone = "${time_zone}"
- }
- group "${job_name}" {
- restart {
- mode = "fail"
- }
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
- task "${job_name}" {
- artifact {
- source = "git::https://github.com/FDio/csit"
- destination = "local/csit"
- }
- driver = "docker"
- config {
- image = "${image}"
- command = "gluesparksubmit"
- args = [
- "--driver-memory", "20g",
- "--executor-memory", "20g",
- "--executor-cores", "2",
- "--master", "local[2]",
- "coverage_mrr_rls2402.py"
- ]
- work_dir = "/local/csit/csit.infra.etl"
- }
- env {
- AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
- AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
- AWS_DEFAULT_REGION = "${aws_default_region}"
- OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
- OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
- OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
- ${ envs }
- }
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}
\ No newline at end of file
"--executor-memory", "20g",
"--executor-cores", "2",
"--master", "local[2]",
- "iterative_mrr_rls2402.py"
+ "coverage_mrr_rls2406.py"
]
work_dir = "/local/csit/csit.infra.etl"
}
+++ /dev/null
-job "${job_name}" {
- datacenters = "${datacenters}"
- type = "${type}"
- periodic {
- cron = "${cron}"
- prohibit_overlap = "${prohibit_overlap}"
- time_zone = "${time_zone}"
- }
- group "${job_name}" {
- restart {
- mode = "fail"
- }
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
- task "${job_name}" {
- artifact {
- source = "git::https://github.com/FDio/csit"
- destination = "local/csit"
- }
- driver = "docker"
- config {
- image = "${image}"
- command = "gluesparksubmit"
- args = [
- "--driver-memory", "20g",
- "--executor-memory", "20g",
- "--executor-cores", "2",
- "--master", "local[2]",
- "coverage_ndrpdr_rls2402.py"
- ]
- work_dir = "/local/csit/csit.infra.etl"
- }
- env {
- AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
- AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
- AWS_DEFAULT_REGION = "${aws_default_region}"
- OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
- OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
- OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
- ${ envs }
- }
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}
\ No newline at end of file
"--executor-memory", "20g",
"--executor-cores", "2",
"--master", "local[2]",
- "iterative_reconf_rls2402.py"
+ "coverage_ndrpdr_rls2406.py"
]
work_dir = "/local/csit/csit.infra.etl"
}
+++ /dev/null
-job "${job_name}" {
- datacenters = "${datacenters}"
- type = "${type}"
- periodic {
- cron = "${cron}"
- prohibit_overlap = "${prohibit_overlap}"
- time_zone = "${time_zone}"
- }
- group "${job_name}" {
- restart {
- mode = "fail"
- }
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
- task "${job_name}" {
- artifact {
- source = "git::https://github.com/FDio/csit"
- destination = "local/csit"
- }
- driver = "docker"
- config {
- image = "${image}"
- command = "gluesparksubmit"
- args = [
- "--driver-memory", "20g",
- "--executor-memory", "20g",
- "--executor-cores", "2",
- "--master", "local[2]",
- "coverage_reconf_rls2402.py"
- ]
- work_dir = "/local/csit/csit.infra.etl"
- }
- env {
- AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
- AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
- AWS_DEFAULT_REGION = "${aws_default_region}"
- OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
- OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
- OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
- ${ envs }
- }
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}
\ No newline at end of file
--- /dev/null
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_reconf_rls2406.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
"--executor-memory", "20g",
"--executor-cores", "2",
"--master", "local[2]",
- "iterative_soak_rls2402.py"
+ "coverage_soak_rls2406.py"
]
work_dir = "/local/csit/csit.infra.etl"
}
"--executor-memory", "20g",
"--executor-cores", "2",
"--master", "local[2]",
- "iterative_hoststack_rls2402.py"
+ "iterative_hoststack_rls2406.py"
]
work_dir = "/local/csit/csit.infra.etl"
}
"--executor-memory", "20g",
"--executor-cores", "2",
"--master", "local[2]",
- "coverage_soak_rls2402.py"
+ "iterative_mrr_rls2406.py"
]
work_dir = "/local/csit/csit.infra.etl"
}
}
}
}
-}
\ No newline at end of file
+}
--- /dev/null
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_ndrpdr_rls2406.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
--- /dev/null
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_reconf_rls2406.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
--- /dev/null
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_soak_rls2406.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
memory = 60000
}
-#module "etl-iterative-hoststack-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-iterative-hoststack-rls2402"
-#}
-#
-#module "etl-iterative-mrr-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-iterative-mrr-rls2402"
-#}
-#
-#module "etl-iterative-ndrpdr-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-iterative-ndrpdr-rls2402"
-#}
-#
-#module "etl-iterative-reconf-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-iterative-reconf-rls2402"
-#}
-#
-#module "etl-iterative-soak-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-iterative-soak-rls2402"
-#}
-#
-#module "etl-coverage-device-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-coverage-device-rls2402"
-#}
-#
-#module "etl-coverage-hoststack-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-coverage-hoststack-rls2402"
-#}
-#
-#module "etl-coverage-mrr-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-coverage-mrr-rls2402"
-#}
-#
-#module "etl-coverage-ndrpdr-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-coverage-ndrpdr-rls2402"
-#}
-#
-#module "etl-coverage-reconf-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-coverage-reconf-rls2402"
-#}
-#
-#module "etl-coverage-soak-rls2402" {
-# providers = {
-# nomad = nomad.yul1
-# }
-# source = "../"
-#
-# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
-# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
-# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
-# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
-# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
-# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
-# cron = "0 30 0 * * * *"
-# datacenters = ["yul1"]
-# job_name = "etl-coverage-soak-rls2402"
-#}
-#
\ No newline at end of file
+module "etl-iterative-hoststack-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-iterative-hoststack-rls2406"
+}
+
+module "etl-iterative-mrr-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-iterative-mrr-rls2406"
+}
+
+module "etl-iterative-ndrpdr-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-iterative-ndrpdr-rls2406"
+}
+
+module "etl-iterative-reconf-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-iterative-reconf-rls2406"
+}
+
+module "etl-iterative-soak-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-iterative-soak-rls2406"
+}
+
+module "etl-coverage-device-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-coverage-device-rls2406"
+}
+
+module "etl-coverage-hoststack-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-coverage-hoststack-rls2406"
+}
+
+module "etl-coverage-mrr-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-coverage-mrr-rls2406"
+}
+
+module "etl-coverage-ndrpdr-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-coverage-ndrpdr-rls2406"
+}
+
+module "etl-coverage-reconf-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-coverage-reconf-rls2406"
+}
+
+module "etl-coverage-soak-rls2406" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-coverage-soak-rls2406"
+}
+