|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 3 | + * contributor license agreements. See the NOTICE file distributed with |
| 4 | + * this work for additional information regarding copyright ownership. |
| 5 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 6 | + * (the "License"); you may not use this file except in compliance with |
| 7 | + * the License. You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | +package org.apache.spark.deploy.k8s |
| 18 | + |
| 19 | +import org.apache.spark.internal.Logging |
| 20 | +import org.apache.spark.internal.config.ConfigBuilder |
| 21 | +import org.apache.spark.network.util.ByteUnit |
| 22 | + |
| 23 | +private[spark] object Config extends Logging { |
| 24 | + |
| 25 | + val KUBERNETES_NAMESPACE = |
| 26 | + ConfigBuilder("spark.kubernetes.namespace") |
| 27 | + .doc("The namespace that will be used for running the driver and executor pods. When using " + |
| 28 | + "spark-submit in cluster mode, this can also be passed to spark-submit via the " + |
| 29 | + "--kubernetes-namespace command line argument.") |
| 30 | + .stringConf |
| 31 | + .createWithDefault("default") |
| 32 | + |
| 33 | + val EXECUTOR_DOCKER_IMAGE = |
| 34 | + ConfigBuilder("spark.kubernetes.executor.docker.image") |
| 35 | + .doc("Docker image to use for the executors. Specify this using the standard Docker tag " + |
| 36 | + "format.") |
| 37 | + .stringConf |
| 38 | + .createOptional |
| 39 | + |
| 40 | + val DOCKER_IMAGE_PULL_POLICY = |
| 41 | + ConfigBuilder("spark.kubernetes.docker.image.pullPolicy") |
| 42 | + .doc("Kubernetes image pull policy. Valid values are Always, Never, and IfNotPresent.") |
| 43 | + .stringConf |
| 44 | + .checkValues(Set("Always", "Never", "IfNotPresent")) |
| 45 | + .createWithDefault("IfNotPresent") |
| 46 | + |
| 47 | + val APISERVER_AUTH_DRIVER_CONF_PREFIX = |
| 48 | + "spark.kubernetes.authenticate.driver" |
| 49 | + val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX = |
| 50 | + "spark.kubernetes.authenticate.driver.mounted" |
| 51 | + val OAUTH_TOKEN_CONF_SUFFIX = "oauthToken" |
| 52 | + val OAUTH_TOKEN_FILE_CONF_SUFFIX = "oauthTokenFile" |
| 53 | + val CLIENT_KEY_FILE_CONF_SUFFIX = "clientKeyFile" |
| 54 | + val CLIENT_CERT_FILE_CONF_SUFFIX = "clientCertFile" |
| 55 | + val CA_CERT_FILE_CONF_SUFFIX = "caCertFile" |
| 56 | + |
| 57 | + val KUBERNETES_SERVICE_ACCOUNT_NAME = |
| 58 | + ConfigBuilder(s"$APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName") |
| 59 | + .doc("Service account that is used when running the driver pod. The driver pod uses " + |
| 60 | + "this service account when requesting executor pods from the API server. If specific " + |
| 61 | + "credentials are given for the driver pod to use, the driver will favor " + |
| 62 | + "using those credentials instead.") |
| 63 | + .stringConf |
| 64 | + .createOptional |
| 65 | + |
| 66 | + // Note that while we set a default for this when we start up the |
| 67 | + // scheduler, the specific default value is dynamically determined |
| 68 | + // based on the executor memory. |
| 69 | + val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD = |
| 70 | + ConfigBuilder("spark.kubernetes.executor.memoryOverhead") |
| 71 | + .doc("The amount of off-heap memory (in megabytes) to be allocated per executor. This " + |
| 72 | + "is memory that accounts for things like VM overheads, interned strings, other native " + |
| 73 | + "overheads, etc. This tends to grow with the executor size. (typically 6-10%).") |
| 74 | + .bytesConf(ByteUnit.MiB) |
| 75 | + .createOptional |
| 76 | + |
| 77 | + val KUBERNETES_EXECUTOR_LABEL_PREFIX = "spark.kubernetes.executor.label." |
| 78 | + val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = "spark.kubernetes.executor.annotation." |
| 79 | + |
| 80 | + val KUBERNETES_DRIVER_POD_NAME = |
| 81 | + ConfigBuilder("spark.kubernetes.driver.pod.name") |
| 82 | + .doc("Name of the driver pod.") |
| 83 | + .stringConf |
| 84 | + .createOptional |
| 85 | + |
| 86 | + val KUBERNETES_EXECUTOR_POD_NAME_PREFIX = |
| 87 | + ConfigBuilder("spark.kubernetes.executor.podNamePrefix") |
| 88 | + .doc("Prefix to use in front of the executor pod names.") |
| 89 | + .internal() |
| 90 | + .stringConf |
| 91 | + .createWithDefault("spark") |
| 92 | + |
| 93 | + val KUBERNETES_ALLOCATION_BATCH_SIZE = |
| 94 | + ConfigBuilder("spark.kubernetes.allocation.batch.size") |
| 95 | + .doc("Number of pods to launch at once in each round of executor allocation.") |
| 96 | + .intConf |
| 97 | + .checkValue(value => value > 0, "Allocation batch size should be a positive integer") |
| 98 | + .createWithDefault(5) |
| 99 | + |
| 100 | + val KUBERNETES_ALLOCATION_BATCH_DELAY = |
| 101 | + ConfigBuilder("spark.kubernetes.allocation.batch.delay") |
| 102 | + .doc("Number of seconds to wait between each round of executor allocation.") |
| 103 | + .longConf |
| 104 | + .checkValue(value => value > 0, "Allocation batch delay should be a positive integer") |
| 105 | + .createWithDefault(1) |
| 106 | + |
| 107 | + val KUBERNETES_EXECUTOR_LIMIT_CORES = |
| 108 | + ConfigBuilder("spark.kubernetes.executor.limit.cores") |
| 109 | + .doc("Specify the hard cpu limit for a single executor pod") |
| 110 | + .stringConf |
| 111 | + .createOptional |
| 112 | + |
| 113 | + val KUBERNETES_EXECUTOR_LOST_REASON_CHECK_MAX_ATTEMPTS = |
| 114 | + ConfigBuilder("spark.kubernetes.executor.lostCheck.maxAttempts") |
| 115 | + .doc("Maximum number of attempts allowed for checking the reason of an executor loss " + |
| 116 | + "before it is assumed that the executor failed.") |
| 117 | + .intConf |
| 118 | + .checkValue(value => value > 0, "Maximum attempts of checks of executor lost reason " + |
| 119 | + "must be a positive integer") |
| 120 | + .createWithDefault(10) |
| 121 | + |
| 122 | + val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector." |
| 123 | +} |
0 commit comments