diff --git a/ui/i18n-config.json b/ui/i18n-config.json index aaf0daa..1594597 100644 --- a/ui/i18n-config.json +++ b/ui/i18n-config.json @@ -1,8 +1,9 @@ { "zh-cn": { "languageSwitching": "语言切换", - "trainingJob": "模型训练", + "trainingJob": "分布式建模", "Notebook": "模型开发", + "Notebook1": "模型开发", "management": "系统管理", "basisPlatform": "基础平台管理", "userManager": "用户信息管理", @@ -26,6 +27,9 @@ "home": "工作概览", "title": "欢迎体验 WeDataSphere 机器学习平台", "introduction": "Prophecis 提供多种模型训练调试方式,集成多种开源机器学习框架,具备机器学习计算集群的多租户管理能力,提供生产环境全栈化容器部署与管理服务。", + "jobList": "实验列表", + "totalJob": "总任务数", + "runjob": "运行中的任务数", "cardNumber": "占用卡数", "totalInstance": "总实例数", "runInstance": "运行中的实例数", @@ -62,48 +66,208 @@ "lastStep": "上一步", "nextStep": "下一步", "confirmClose": "确认关闭", - "paste": "粘贴" + "paste": "粘贴", + "export": "导出" }, "DI": { - "name": "名称", + "distributedModeling": "建模实验", + "createGPU": "创建GPU集群任务", + "createHadoop": "创建Hadoop集群任务", + "modelId": "任务ID", + "jobName": "任务名称", + "cpu": "计算任务CPU", + "gpu": "计算任务GPU", "image": "镜像", + "dataStorage": "数据存储", + "resultStorage": "结果存储", "status": "状态", "createTime": "创建时间", + "createTraining": "创建训练任务", "basicSettings": "基础信息设置", + "trainingjobName": "训练任务名称", + "description": "描述", + "computingResource": "计算资源设置", + "memory": "计算任务内存", "imageSettings": "镜像设置", "imageType": "镜像类型", "standard": "标准", "custom": "自定义", "imageSelection": "镜像选择", - "imagePro": "请选择镜像列", - "imageInputPro": "请输入镜像", - "computingResource": "计算资源设置", - "YARNQueue": "队列配置", - "queuePro": "请输入队列配置(例如:root.default)", - "driverMemory": "Driver Memory", - "driverMemoryPro": "设置Driver所需的内存资源", - "executorInstances": "Executor Cores", - "executorInstancesPro": "设置各个Executor并发线程数目(必须为整数)", - "executorMemory": "Executor Memory", - "executorMemoryPro": "设置Executor所需的内存资源", - "linkisInstance": "Executors", - "linkisInstancePro": "设置任务所需的Executor数量(必须为整数)", + "trainingDirectory": "训练任务目录设置", "localPath": "宿主机目录", + "trainingData": "训练数据子目录", + "trainingResult": "训练结果子目录", + "jobExecution": "任务执行设置", + "entrance": "执行入口", + "bucket": "目录类型", + "name": "名称", + "path": "路径", + "type": "类型", + "trainingjobNamePro": "请输入训练任务名称", + "jobDescriptionPro": "请输入任务描述", + "CPUPro": "设置Learner需要的CPU资源(例如:1 或 2.1)", + "GPUPro": "设置Learner需要的GPU资源(必须为整数)", + "memoryPro": "设置Learner需要的内存资源(必须为整数)", + "imagePro": "请选择镜像列", + "localPathPro": "请输入宿主机目录", + "dataSubdirectoryPro": "设置模型训练读取的数据目录,全路径为:训练数据存储根目录/训练数据子目录,环境变量为:${DATA_DIR}", + "resultSubdirectoryPro": "设置模型训练结果的数据目录,全路径为:训练数据存储根目录/训练结果子目录,环境变量为:${RESULT_DIR}", + "entrancePro": "设置模型训练任务的启动命令,例如:python3 ${MODEL_DIR}/tf-model/convolutional_network.py", + "viewlog": "日志", + "trainingjobNameReq": "训练任务名称不能为空", "imageTypeReq": "镜像类型不能为空", "imageColumnReq": "镜像列不能为空", "CPUReq": "CPU不能为空", "GPUReq": "GPU不能为空", "memoryReq": "内存不能为空", + "dataDirectoryReq": "数据子目录不能为空", + "resultDirectoryReq": "结果子目录不能为空", + "localPathReq": "宿主机目录不能为空", + "entranceReq": "执行入口不能为空", + "descriptionReq": "描述不能为空", + "autoRefresh": "自动刷新", + "manuallyRefresh": "手动刷新", + "setRefreshTime": "设置刷新时间", + "refreshTime": "刷新时间", + "refreshTimePro": "请选择刷新时间", + "refreshTimeReq": "刷新时间不能为空", + "block": "块", + "trainingDataStore": "训练数据存储根目录", + "trainingDataStorePro": "设置训练任务的数据读写路径根路径,例如:/data/bdap-ss/mlss-data/hduser05/mlss", + "trainingjobNameFormat": "训练任务名称只能输入数字、字母、横杠", + "GPUFormat": "GPU只能输入整数", + "dataDirectoryFormat": "训练数据子目录只能输入数字、字母、下划线、横杠、斜杠", + "resultDirectoryFormat": "训练结果子目录只能输入数字、字母、下划线、横杠、斜杠", + "trainingDataStoreFormat": "训练数据存储根目录以/开头,只能输入数字、字母、下划线、横杠、斜杠", + "trainingDataStoreReq": "训练数据存储根目录不能为空", + "imageInputReq": "镜像不能为空", + "imageInputPro": "请输入镜像", "imageInputFormat": "镜像只能输入数字、字母、下划线、横杠、点", + "taskDetail": "查看", + "mountPathCheck": "容器内目录不能相同", + "alarmSet": "任务告警设置", + "alarmTypes": "告警类型", + "alarmTypesPro": "请选择告警类型", + "eventAlarm": "事件告警", + "timeoutAlarm": "超时告警", + "eventAlarmSettings": "事件告警设置", + "listeningEnent": "监听事件", + "listeningEnentPro": "请选择监听事件", + "executeSuccess": "执行成功", + "executeFail": "执行失败", + "alarmLevel": "告警级别", + "alarmLevelPro": "请选择告警级别", + "timeoutAlarmSetting": "超时告警设置", + "timeoutTypeSetting": "超时类型设置", + "timeoutTypePro": "请选择超时类型", + "designatedTimeout": "定点超时", + "hoursOvertime": "超时时长", + "designatedTimeoutSetting": "定点超时设置", + "designatedTimeoutPro": "请选择定点超时时间", + "timeoutTimeSetting": "超时时长设置", + "timeoutTimePro": "请输入超时时长", + "timeoutTimeFormat": "超时时长大于零且为整数", + "alarmFrequency": "告警频率", + "alarmFrequencyPro": "请输入告警频率", + "alarmFrequencyFormat": "告警频率大于零且为整数", + "alarmTypesReq": "告警类型不能为空", + "listeningEnentReq": "监听事件不能为空", + "alarmLevelReq": "告警级别不能为空", + "timeoutTypeReq": "超时类型不能为空", + "designatedTimeoutReq": "定点超时不能为空", + "timeoutTimeReq": "超时时长不能为空", + "alarmFrequencyReq": "告警频率不能为空", + "receiver": "告警接收人", + "receiverPro": "请输入告警接收人", + "receiverReq": "告警接收人不能为空", + "alarmDetail": "告警详情", + "operationList": "操作列表", + "executeCodeSettings": "执行代码设置", + "manualUpload": "手动上传", + "shareDirectory": "训练代码子目录", + "executeCodeSettingsReq": "执行代码设置不能为空", + "shareDirectoryReq": "训练代码子目录不能为空", + "shareDirectoryFormat": "训练代码子目录只能输入数字、字母、下划线、横岗、斜杠", + "shareDirectoryPro": "设置模型训练的代码存储子目录,全部路径为:存储数据存储根目录/训练代码子目录,环境变量为:${MODEL_DIR}", + "executeCodeSettingsPro": "请设置执行代码", + "uploadModeSettings": "上传方式设置", + "uploadModeSettingsReq": "上传方式设置不能为空", + "uploadModeSettingsPro": "请选择上传方式", + "uploadModenotFile": "执行入口设置上传文件为空", + "learners": "计算任务数", + "pss": "参数服务器数", + "learnersPro": "请输入计算任务数", + "pssPro": "请输入参数服务器数", + "learnersReq": "计算任务数不能为空", + "pssReq": "参数服务器数不能为空", + "learnersFormat": "计算任务数只能输入整数", + "pssFormat": "参数服务器数只能输入整数", + "ps_cpu": "参数服务器CPU", + "ps_memory": "参数服务器内存", + "ps_cpuPro": "设置Learner需要的PS CPU资源(例如:1 或 2.1)", + "ps_memoryPro": "设置Learner需要的PS Memory资源(例如:1 或 2.1)", + "ps_image": "参数服务器镜像", + "ps_imageType": "参数服务器镜像类型", + "jobType": "任务类型", + "jobTypePro": "请选择任务类型", + "jobTypeReq": "任务类型不能为空", + "Single": "单机", + "distributed": "分布式", + "YARNQueue": "YARN 队列配置", + "queuePro": "请输入队列配置(例如:root.default)", "queueReq": "队列不能为空", "queueFormat": "队列只能输入数据、字母、下划线、点", + "queue": "队列", + "pythonSetting": "Python版本设置", + "pythonType": "Python类型", + "pythonTypeReq": "Python类型不能为空", + "pythonSelect": "Python选择", + "pythonSelectPro": "请选择Python版本", + "pythonSelectReq": "Python版本不能为空", + "HDFSPath": "HDFS 目录", + "HDFSPathPro": "请输入HDFS 目录", + "HDFSPathReq": "HDFS 目录不能为空", + "HDFSPathFormat": "HDFS 目录只能输入数字、字母、下划线、横岗、斜杠、点、:", + "driverMemory": "Driver Memory", + "driverMemoryPro": "设置Driver所需的内存资源", + "driverMemoryReq": "Driver Memory不能为空", "driverMemoryFormat": "Driver Memory大于零且为整数", + "executorInstances": "Executor Cores", + "executorInstancesPro": "设置各个Executor并发线程数目(必须为整数)", + "executorInstancesReq": "Executor Cores不能为空", "executorInstancesFormat": "Executor Cores大于零且为整数", + "executorMemory": "Executor Memory", + "executorMemoryPro": "设置Executor所需的内存资源", + "executorMemoryReq": "Executor Memory不能为空", "executorMemoryFormat": "Executor Memory大于零且为整数", - "localPathReq": "宿主机目录不能为空", - "descriptionReq": "描述不能为空", - "block": "块", - "description": "描述" + "linkisInstance": "Executors", + "linkisInstancePro": "设置任务所需的Executor数量(必须为整数)", + "linkisInstanceReq": "Executors不能为空", + "linkisInstanceFormat": "Executors大于零且为整数", + "TrainingOrDataSetting": "训练任务目录设置或数据上传", + "pyfiles": "项目打包", + "mainCode": "main脚本", + "dataFiles": "数据文件", + "executionEnvironmen": "执行环境设置", + "taskExecution": "任务执行设置", + "codeSettings": "代码文件设置", + "codeSettingsReq": "代码文件设置不能为空", + "dateSettings": "数据文件设置", + "executionEntrySettings": "执行入口设置", + "executionEntrySettingsReq": "执行入口设置不能为空", + "createMlFlow": "创建ML FLOW任务", + "nolog": "任务申请资源中,暂无日志。", + "taskExecutionID": "任务执行ID", + "taskName": "任务名称", + "experimentExecutionID": "实验执行ID", + "experimentName": "实验名称", + "submissionTime": "提交时间", + "endTime": "结束时间", + "committer": "提交者", + "executionStatus": "执行状态", + "termination": "终止", + "terminateJobPro": "确定要终止该任务吗?", + "enterCodeDirectory": "进入代码目录" }, "AIDE": { "individualDevEnvironment": "个人开发环境", @@ -333,8 +497,9 @@ }, "en": { "languageSwitching": "Language switching", - "trainingJob": "Training Job", + "trainingJob": "Model Training", "Notebook": "MLLabis", + "Notebook1": "Notebooks", "management": "Management", "basisPlatform": "Basic Platform", "userManager": "User", @@ -358,6 +523,9 @@ "home": "Dashboard", "title": "Welcome to WeDataSphere machine learning platform", "introduction": "Prophecis provides various ways for model trainning and debugging, integrates self-developed machine learning algorithms as well as multiple open source frameworks, and has multi-tenant mangement capabilities for heterogeneous clusters of high-performance.", + "jobList": "Experiment List", + "totalJob": "Total number of jobs", + "runjob": "Number of running jobs", "cardNumber": "Occupied GPUs", "totalInstance": "Instances", "runInstance": "Active Instances", @@ -392,47 +560,205 @@ "networkException": "WebService Exception", "urlPro": "Please pass the URL", "lastStep": "Last Step", - "nextStep": "Next Step" + "nextStep": "Next Step", + "export": "Export" }, "DI": { - "name": "Name", + "distributedModeling": "Modeling Experimental", + "createGPU": "Create a GPU job", + "createHadoop": "Create a hadoop job", + "modelId": "ModelID", + "jobName": "Job name ", + "cpu": "Worker CPU", + "gpu": "Worker GPU", "image": "Image", + "dataStorage": "Data storage", + "resultStorage": "Result storage", "status": "Status", "createTime": "Create time", + "createTraining": "Create a training job", "basicSettings": "Basic settings", + "trainingjobName": "Training job name", + "description": "Desciption", + "computingResource": "Computing resources settings", + "memory": "Worker memory", "imageSettings": "Image settings", - "imageType": "Type", + "imageType": "Image type", "standard": "Standard", "custom": "Custom", - "imageSelection": "Name", - "imagePro": "Please select a image column", - "imageInputPro": "Please input image tags", - "computingResource": "Resource settings", - "YARNQueue": "Queue setting", - "queuePro": "Please input queue configuration (e.g. root.default)", - "driverMemory": "Driver Memory", - "driverMemoryPro": "Set the memory resources for driver", - "executorInstances": "Executor Cores", - "executorInstancesPro": "Set the number of cores for executor", - "executorMemory": "Executor Memory", - "executorMemoryPro": "Set the memory resources for executor", - "linkisInstance": "Executors", - "linkisInstancePro": "Set the number of executors to launch", + "imageSelection": "Image selection", + "trainingDirectory": "Training job directory settings", "localPath": "Host path", + "trainingData": "Training data subdirectory", + "trainingResult": "Training result subdirectory", + "jobExecution": "Job execution settings", + "entrance": "Execution entrance", + "bucket": "Bucket", + "name": "Name", + "path": "Path", + "type": "Type", + "trainingjobNamePro": "Please input the name of training job", + "jobDescriptionPro": "Please input the job description", + "CPUPro": "Set the CPU resources for Learner (e.g. 1 or 2.1)", + "GPUPro": "Set the GPU resources for Learner (Must be Integers)", + "memoryPro": "Set the memory resources for Learner (Must be Integers)", + "imagePro": "Please select a image column", + "localPathPro": "Please input local path directory", + "dataSubdirectoryPro": "Set data subdirectory for training on nodes, full path: /, environment variable in container: ${DATA_DIR}", + "resultSubdirectoryPro": "Set result subdirectory for training on nodes, full path: /, environment variable in container: ${RESULT_DIR}", + "entrancePro": "Set the start command of the model training tasks (e.g: python3 ${MODEL_DIR}/tf-model/convolutional_network.py)", + "viewlog": "Log", + "trainingjobNameReq": "Training job name cannot be empty", "imageTypeReq": "Image type cannot be empty", "imageColumnReq": "Image column cannot be empty", "CPUReq": "CPU cannot be empty", "GPUReq": "GPU cannot be empty", "memoryReq": "Memory cannot be empty", + "dataDirectoryReq": "Data subdirectory cannot be empty", + "resultDirectoryReq": "Result subdirectory cannot be empty", + "localPathReq": "Host path cannot be empty", + "entranceReq": "Entrance cannot be empty", + "descriptionReq": "Desciption cannot be empty", + "autoRefresh": "Auto refresh", + "manuallyRefresh": "Manually refresh", + "setRefreshTime": "Set refresh interval", + "refreshTime": "Refresh interval", + "refreshTimePro": "Please select refresh interval", + "refreshTimeReq": "Refresh interval cannot be be empty", + "block": "Block", + "trainingDataStore": "Training root directory", + "trainingDataStorePro": "Set the root path on nodes of the training tasks (e.g. :/data/bdap-ss/mlss-data/hduser05/mlss)", + "trainingjobNameFormat": "Traning job name can only include digits,letters and hyphens", + "GPUFormat": "GPU must be an integer", + "dataDirectoryFormat": "The subdirectory of the training data may only contain digits, letters, underscores, hyphens and slashs", + "resultDirectoryFormat": "The subdirectory of the training results may only contain digits, letters, underscores, hyphens and slashs", + "trainingDataStoreFormat": "The root directory of the training data starts with / and may only contain digits, letters, underscores, hyphens and slashs", + "trainingDataStoreReq": "The root directory of the training data maynot be empty", + "imageInputReq": "Image tags cannot be empty", + "imageInputPro": "Please input image tags", "imageInputFormat": "Image tags may only contain digits, letters, dots, underscores and hyphens", + "taskDetail": "View", + "alarmSet": "Task alert settings", + "alarmTypes": "Alert types", + "alarmTypesPro": "Please select a alert type", + "eventAlarm": "Event alert", + "timeoutAlarm": "Timeout alert", + "eventAlarmSettings": "Event alert settings", + "listeningEnent": "Listening event", + "listeningEnentPro": "Please select a listening event", + "executeSuccess": "Successfully executed", + "executeFail": "Execute failed", + "alarmLevel": "Alert level", + "alarmLevelPro": "Please select an alert level", + "timeoutAlarmSetting": "Timeout alert settings", + "timeoutTypeSetting": "Timeout type settings", + "timeoutTypePro": "Please select a timeout tpye", + "designatedTimeout": "Endpoint timeout", + "hoursOvertime": "Timeout", + "designatedTimeoutSetting": "Endpoint timeout settings", + "designatedTimeoutPro": "Please select a endpoint", + "timeoutTimeSetting": "Timeout settings", + "timeoutTimePro": "Please set the timeout", + "timeoutTimeFormat": "Timeout should be a integer that greater than one", + "alarmFrequency": "Alert frequency", + "alarmFrequencyPro": "Please input alert frequency", + "alarmFrequencyFormat": "Alert frequency should be a integer that greater than one", + "alarmTypesReq": "Alert type may not be empty", + "listeningEnentReq": "Listening event may not be empty", + "alarmLevelReq": "Alert level may not be empty", + "timeoutTypeReq": "Timeout type may not be empty", + "designatedTimeoutReq": "Endpoint timeout may not be empty", + "timeoutTimeReq": "Timeout may not be empty", + "alarmFrequencyReq": "Alert frequency may not be empty", + "receiver": "Alert receiver", + "receiverPro": "Please input alert receiver", + "receiverReq": "Alert receiver may not be empty", + "alarmDetail": "Details of task ", + "operationList": "Operation list", + "executeCodeSettings": "Execute code settings", + "manualUpload": "Manual upload", + "getAIDE": "Get AIDE", + "shareDirectory": "Training code subdirectory", + "executeCodeSettingsReq": "Execute code settings cannot be empty", + "shareDirectoryReq": "Training code subdirectory cannot be empty", + "shareDirectoryFormat": "Training code subdirectory may only contain digits, letters, underscores, hyphens and slashs", + "shareDirectoryPro": "Set code subdirectory of model training, the full path is: /, environment variable in container: ${MODEL_DIR}", + "executeCodeSettingsPro": "Please set the execution code", + "uploadModeSettings": "Upload mode settings", + "uploadModeSettingsReq": "Upload mode settings cannot be empty", + "uploadModeSettingsPro": "Please select upload mode", + "uploadModenotFile": "The upload file of execution entry setting is empty", + "learners": "Worker", + "pss": "Parameter server", + "ps_cpu": "PS CPU", + "ps_memory": "PS Memory", + "learnersPro": "Please input worker", + "pssPro": "Please input parameter server", + "learnersReq": "Worker cannot be empty", + "pssReq": "Parameter server cannot be empty", + "learnersFormat": "Worker must be an integer", + "pssFormat": "Parameter server must be an integer", + "ps_cpuPro": "Set the PS CPU resources for Learner(e.g. 1 or 2.1)", + "ps_memoryPro": "Set the PS Memory resources for Learner(Must be Integers)", + "ps_image": "PS Image", + "ps_imageType": "PS Image Type", + "jobType": "Job type", + "jobTypePro": "Please select job type", + "jobTypeReq": "Job type cannot be empty", + "Single": "Single", + "distributed": "Distributed", + "YARNQueue": "YARN queue setting", + "queuePro": "Please input queue configuration (e.g. root.default)", "queueReq": "Queue cannot be empty", "queueFormat": "Queue configuration may only contain digits, letters, underscores and dots", + "queue": "Queue", + "pythonSetting": "Python version setting", + "pythonType": "Python type", + "pythonTypeReq": "Python type cannot be empty", + "pythonSelect": "Python select", + "pythonSelectPro": "Please select python version", + "pythonSelectReq": "Python version cannot be empty", + "HDFSPath": "HDFS path", + "HDFSPathPro": "Please input HDFS path", + "HDFSPathReq": "HDFS path cannot be empty", + "HDFSPathFormat": "HDFS path may only contain digits, letters, underscores, hyphens, slashs, dots and :", + "driverMemory": "Driver Memory", + "driverMemoryPro": "Set the memory resources for driver", + "driverMemoryReq": "Driver memory cannot be empty", "driverMemoryFormat": "Driver memory should be a integer that greater than one", + "executorInstances": "Executor Cores", + "executorInstancesPro": "Set the number of cores for executor", + "executorInstancesReq": "Executor cores cannot be empty", "executorInstancesFormat": "Executor cores should be a integer that greater than one", + "executorMemory": "Executor Memory", + "executorMemoryPro": "Set the memory resources for executor", + "executorMemoryReq": "Executor memory cannot be empty", "executorMemoryFormat": "Executor memory should be a integer that greater than one", - "localPathReq": "Host path cannot be empty", - "description": "Desciption", - "block": "Block" + "linkisInstance": "Executors", + "linkisInstancePro": "Set the number of executors to launch", + "linkisInstanceReq": "Executors cannot be empty", + "linkisInstanceFormat": "Executors should be a integer that greater than one", + "TrainingOrDataSetting": "Training directory settings or data upload", + "taskExecution": "Task execution settings", + "executionEnvironmen": "Execution environment settings", + "codeSettings": "Code file settings", + "codeSettingsReq": "Code file settings cannot be empty", + "dateSettings": "Data file settings", + "executionEntrySettings": "Execute entry settings", + "executionEntrySettingsReq": "Execute entry settings cannot be empty", + "createMlFlow": "Create ML FLOW job", + "nolog": "There is no log in the task request resource.", + "taskExecutionID": "Task ID", + "taskName": "Task Name", + "experimentExecutionID": "Experiment Execution ID", + "experimentName": "Experiment name", + "submissionTime": "Submission time", + "endTime": "End time", + "committer": "Owner", + "executionStatus": "Status", + "termination": "Termination", + "terminateJobPro": "Are you sure you want to terminate this job?", + "enterCodeDirectory": "Enter the code directory" }, "AIDE": { "individualDevEnvironment": "Individual development environment", diff --git a/ui/public/static/env-config.js b/ui/public/static/env-config.js index 85fa2af..bf43616 100644 --- a/ui/public/static/env-config.js +++ b/ui/public/static/env-config.js @@ -1,8 +1,15 @@ // 动态变量配置,该配置文件用于本地联调,线上环境后台配置文件会覆盖本文件,因此配置文件同步后台配置文件 var mlssEnvConfig = { 'development': { + 'DI': { + 'defineImage': 'uat.sf.dockerhub.stgwebank/wedatasphere/prophecis', // 镜像基本路径 + 'imageOption': ['tensorflow-1.5.0-py3', 'tensorflow-1.5.0-gpu-py3-wml-v1'], + 'definePython': '', + 'pythonOption': [ + ] + }, 'AIDE': { - 'defineImage': 'uat.sf.dockerhub.stgwebank/webank/mlss-di', // 镜像基本路径 + 'defineImage': 'uat.sf.dockerhub.stgwebank/wedatasphere/prophecis', // 镜像基本路径 'imageOption': ['tensorflow-1.12.0-notebook-gpu-v0.4.0', 'tensorflow-1.12.0-notebook-gpu-v0.4.0-wml-v1'] }, 'basisPlatform': { @@ -15,8 +22,15 @@ var mlssEnvConfig = { 'ccApiVersion': 'v1' // cc接口模块版本号 }, 'production': { + 'DI': { + 'defineImage': 'uat.sf.dockerhub.stgwebank/wedatasphere/prophecis', // 镜像基本路径 + 'imageOption': ['tensorflow-1.5.0-py3', 'tensorflow-1.5.0-gpu-py3-wml-v1'], + 'definePython': '', + 'pythonOption': [ + ] + }, 'AIDE': { - 'defineImage': 'uat.sf.dockerhub.stgwebank/webank/mlss-di', // 镜像基本路径 + 'defineImage': 'uat.sf.dockerhub.stgwebank/wedatasphere/prophecis', // 镜像基本路径 'imageOption': ['tensorflow-1.12.0-notebook-gpu-v0.4.0', 'tensorflow-1.12.0-notebook-gpu-v0.4.0-wml-v1'] }, 'basisPlatform': { diff --git a/ui/src/components/AlarmSetting.vue b/ui/src/components/AlarmSetting.vue new file mode 100644 index 0000000..7694e24 --- /dev/null +++ b/ui/src/components/AlarmSetting.vue @@ -0,0 +1,400 @@ + + + diff --git a/ui/src/components/CreateGPU.vue b/ui/src/components/CreateGPU.vue new file mode 100644 index 0000000..e8a625d --- /dev/null +++ b/ui/src/components/CreateGPU.vue @@ -0,0 +1,478 @@ + + + diff --git a/ui/src/components/StoragePathAide.vue b/ui/src/components/StoragePathAide.vue new file mode 100644 index 0000000..2faea11 --- /dev/null +++ b/ui/src/components/StoragePathAide.vue @@ -0,0 +1,199 @@ + + + diff --git a/ui/src/components/Upload.vue b/ui/src/components/Upload.vue new file mode 100644 index 0000000..276f1e6 --- /dev/null +++ b/ui/src/components/Upload.vue @@ -0,0 +1,159 @@ + + + diff --git a/ui/src/components/sidebarConfig.js b/ui/src/components/sidebarConfig.js index 2ec09c4..59a405b 100644 --- a/ui/src/components/sidebarConfig.js +++ b/ui/src/components/sidebarConfig.js @@ -6,6 +6,11 @@ export default { title: 'home.home', icon: 'icon-shouye' }, + { + index: '/DI', + title: 'trainingJob', + icon: 'icon-moxingshu' + }, { index: '/AIDE', title: 'Notebook', diff --git a/ui/src/router/index.js b/ui/src/router/index.js index 9b4061f..2cdd1c5 100644 --- a/ui/src/router/index.js +++ b/ui/src/router/index.js @@ -43,6 +43,26 @@ const routes = [ component: () => import('../views/Home.vue') }, + { + path: '/DI', + name: 'DI', + meta: { + login: true + }, + component: () => + import('../views/DI/index.vue') + }, + { + path: '/DI/log', + name: 'DILogDetail', + meta: { + login: true, + groupName: 'DI', + title: 'DI.viewlog' + }, + component: () => + import('../views/DI/DetailLog.vue') + }, { path: '/AIDE', name: 'AIDE', diff --git a/ui/src/util/handleDIDetailMixin.js b/ui/src/util/handleDIDetailMixin.js new file mode 100644 index 0000000..2394bb5 --- /dev/null +++ b/ui/src/util/handleDIDetailMixin.js @@ -0,0 +1,144 @@ +import util from './common' +export default { + methods: { + basicInfoProcess (item) { + let keyArry = ['model_id', 'name', 'user_id', 'description'] + let modelsItem = {} + modelsItem.job_type = item.JobType + modelsItem.image = item.framework.version + modelsItem.status = item.training.training_status.status + modelsItem.namespace = item.job_namespace + modelsItem.submission_timestamp = util.transDate(item.submission_timestamp) + modelsItem.completed_timestamp = util.transDate(item.completed_timestamp) + if (item.data_stores) { + modelsItem.trainingData = item.data_stores[0].connection.bucket + modelsItem.trainingResults = item.data_stores[1].connection.bucket + // modelsItem.data_stores = item.data_stores + modelsItem.path = item.data_stores[1].connection.path + modelsItem.data_stores = item.data_stores + // data_stores 如果存在第三个对象 执行代码为共享目录 + if (item.data_stores[2]) { + modelsItem.codeSettings = 'storagePath' + modelsItem.diStoragePath = item.data_stores[2].connection.codeWork + } else { + modelsItem.codeSettings = 'codeFile' + modelsItem.fileName = item.fileName + modelsItem.filePath = item.filePath + } + } + if (item.JobAlert && item.JobAlert !== 'null') { + modelsItem.job_alert = this.processAlertInfo(item.JobAlert) + } + if (item.JobType === 'tfos') { + this.handleHadoopData(modelsItem, item) + } else { + this.handleGPUData(modelsItem, item) + } + for (let key of keyArry) { + modelsItem[key] = item[key] + } + return modelsItem + }, + handleGPUData (modelsItem, trdata) { + modelsItem.gpus = trdata.training.gpus || 0 + const keyArry = ['cpus', 'memory', 'command', 'learners', 'ps_cpu', 'ps_image', 'ps_memory', 'pss'] + for (let key of keyArry) { + if (key === 'cpus' || key === 'command' || key === 'memory' || key === 'learners') { + modelsItem[key] = trdata.training[key] + } else { + modelsItem[key] = trdata[key] + } + } + }, + // hadoop数据提取 + handleHadoopData (modelsItem, trdata) { + const taskBaseObj = { + codeSettings: 'HDFSPath', + HDFSPath: '', + fileName: '' + } + const tfos = trdata.TFosRequest + modelsItem.driver_memory = tfos.DriverMemory + modelsItem.executor_cores = tfos.ExecutorCores + modelsItem.executor_memory = tfos.ExecutorMemory + modelsItem.queue = tfos.Queue + modelsItem.executors = tfos.Executors + modelsItem.command = tfos.Command + // const keyArry = ['Queue', 'Executors', 'Command'] + // for (let key of keyArry) { + // modelsItem[key] = tfos[key] + // } + modelsItem.pyfile = tfos.py_file + modelsItem.archives = tfos.Archives + taskBaseObj.HDFSPath = tfos.EntryPoint.hdfs + modelsItem.entrypoint = { ...taskBaseObj } + let pythonUrl = tfos.TensorflowEnv.hdfs + let curPyth = this.FesEnv.DI.pythonOption.find((x) => x.value.indexOf(pythonUrl) > -1) + if (curPyth !== undefined) { + modelsItem.pythonType = 'Standard' + modelsItem.pythonOption = curPyth.value + } else { + modelsItem.pythonType = 'Custom' + modelsItem.pythonInput = pythonUrl + } + }, + // 列表返回告警详情数据重新组装,由于后台告警信息直接存的是字符串 + processAlertInfo (alert, isFormat = true) { + let alertObj = isFormat ? window.JSON.parse(alert) : alert + let arr = [] + let jobAlert = [] + alertObj.deadline && arr.push(alertObj.deadline.length) + alertObj.event && arr.push(alertObj.event.length) + alertObj.overtime && arr.push(alertObj.overtime.length) + arr.sort() + for (let i = 0; i < arr[arr.length - 1]; i++) { + let obj = { + alarmType: [] + } + if (alertObj.event && alertObj.event[i]) { + obj.alarmType.push('1') + obj.event = alertObj.event[i] + } + if (alertObj.deadline && alertObj.deadline[i]) { + obj.alarmType.push('2') + obj.fixTime = alertObj.deadline[i] + } + if (alertObj.overtime && alertObj.overtime[i]) { + obj.alarmType.push('3') + obj.overTime = alertObj.overtime[i] + } + jobAlert.push(obj) + } + console.log('jobAlert', jobAlert) + return jobAlert + }, + handelTfosCopy (trData) { + const tfosKey = ['pyfile', 'archives'] + for (let item of tfosKey) { + this.handleHadoopCopy(item, trData) + } + }, + handleHadoopCopy (moduleKey, trData) { + const taskBaseObj = { + codeSettings: 'HDFSPath', + HDFSPath: '', + fileName: '' + } + let moduleArr = [] + if (trData[moduleKey] && trData[moduleKey].length > 0) { + for (let j = 0; j < trData[moduleKey].length; j++) { + if (j > 0) { + this.$refs.hadoopDialog.hdpAddExecutorsValidate(moduleKey, j) // 数据文件设置表单校验规则动态增加 + } + let item = { ...taskBaseObj } + item.HDFSPath = trData[moduleKey][j].hdfs + moduleArr.push(item) + } + } else { + moduleArr.push({ ...taskBaseObj }) + } + trData[moduleKey] = moduleArr + // this.$refs.hadoopDialog.form[moduleKey] = moduleArr + } + } +} diff --git a/ui/src/util/handleDIJobMixin.js b/ui/src/util/handleDIJobMixin.js new file mode 100644 index 0000000..11b6319 --- /dev/null +++ b/ui/src/util/handleDIJobMixin.js @@ -0,0 +1,276 @@ +import util from './common' +export default { + methods: { + // gpu函数 + gpuBeforeUpload (file) { + const fileTpye = file.type.indexOf('zip') > -1 + const isLt512M = file.size / 1024 / 1024 < 512 + const limit = this.files === '' + if (!fileTpye) { + this.$message.error(this.$t('common.fileFormat')) + } + if (!isLt512M) { + this.$message.error(this.$t('common.fileSize')) + } + if (!limit) { + this.$message.error(this.$t('common.fileLimit')) + return false + } + return fileTpye && isLt512M + }, + gpuDeleteFile () { + this.files = '' + this.fileName = '' + }, + gpuUploadFile (files) { + if (files.file) { + this.fileName = files.file.name + this.files = files.file + } + }, + handleGPUSubParam () { + let param = { + name: this.form.name, + description: this.form.description, + version: '1.0', + gpus: +this.form.gpus, + cpus: +this.form.cpus, + memory: this.form.memory + 'Gb', + namespace: this.form.namespace, + code_selector: this.form.codeSettings, + job_type: this.form.job_type, + data_stores: [{ + id: 'hostmount', + type: 'mount_volume', + training_data: { + container: this.form.trainingData + }, + training_results: { + container: this.form.trainingResults + }, + connection: { + type: 'host_mount', + name: 'host-mount', + path: this.form.path + } + }], + framework: { + name: this.FesEnv.DI.defineImage, + version: this.form.imageType === 'Standard' ? this.form.imageOption : this.form.imageInput, + command: this.form.command + }, + evaluation_metrics: { + type: 'fluent-bit' + } + } + return param + }, + // hadoop 函数 + hdpTaskBaseObj () { + return { + codeSettings: 'codeFile', + HDFSPath: '', + fileName: '' + } + }, + hdpAddExecutorsTask (objKey) { + const baseObj = this.hdpTaskBaseObj() + const leng = this.form[objKey].length + if (leng >= 1) { + this.hdpAddExecutorsValidate(objKey, leng) + } + this.form[objKey].push(baseObj) + }, + hdpDeleteExecutorsTask (objKey) { + const leng = this.form[objKey].length + this.form[objKey].splice(leng - 1, 1) + if (leng > 1) { + this.hdpDeleteExecutorsValidate(objKey, leng) + } + }, + hdpAddExecutorsValidate (objKey, index) { + this.ruleValidate[`${objKey}[${index}].HDFSPath`] = this.ruleValidate[`${objKey}[0].HDFSPath`] + }, + hdpDeleteExecutorsValidate (objKey, index) { + delete this.ruleValidate[`${objKey}[${index - 1}].HDFSPath`] + }, + // changeCodeSettings (objKey, index) { + // let modules = {} + // modules = this.form[objKey][index] + // if (this.form[objKey].codeSettings === 'codeFile') { + // modules.HDFSPath = '' + // } else { + // modules.fileName = '' + // } + // }, + hdpSubmitUpload (objKey, index) { + const key = `${objKey}Upload${index}` + this.$refs[key][0].submit() + this.currentUplod.uploadModule = objKey + this.currentUplod.index = index + }, + hdpBeforeUpload (file) { + const fileTpye = file.type.indexOf('zip') > -1 + const isLt512M = file.size / 1024 / 1024 < 512 + const currentUplod = this.currentUplod.uploadModule + let limit = '' + const currentIndex = this.currentUplod.index + limit = this.form[currentUplod][currentIndex].fileName === '' + if (!fileTpye) { + this.$message.error(this.$t('common.fileFormat')) + } + if (!isLt512M) { + this.$message.error(this.$t('common.fileSize')) + } + if (!limit) { + this.$message.error(this.$t('common.fileLimit')) + return false + } + return fileTpye && isLt512M + }, + hdpDeleteFile (objKey, index) { + this.form[objKey][index].fileName = '' + }, + hadUploadFile (response, files) { + if (response.data) { + const currentUplod = this.currentUplod.uploadModule + const currentIndex = this.currentUplod.index + this.form[currentUplod][currentIndex].fileName = files.raw.name + } + }, + handleHdpSubParam () { + let param = { + version: '1.0', + job_type: 'tfos', + code_selector: 'storagePath', + framework: { + name: 'uat.sf.dockerhub.stgwebank/wedatasphere/prophecis', + version: 'tfosexecutor-1.6.0' + }, + evaluation_metrics: { + type: 'fluent-bit' + } + } + const arr = ['name', 'namespace', 'description'] + for (let item of arr) { + param[item] = this.form[item] + } + this.handelHdpTfos(param) + const tfosArr = ['queue', 'executor_cores', 'executor_memory', 'driver_memory', 'executors', 'command'] + for (let key of tfosArr) { + param.tfos_request[key] = this.form[key] + } + return param + }, + handelHdpTfos (param) { + let arr = ['pyfile', 'archives'] + param.tfos_request = { + pyfile: [], + archives: [] + } + const time = util.getTime() + const definePython = this.FesEnv.DI.definePython + for (let key of arr) { + for (let item of this.form[key]) { + if (item.codeSettings === 'codeFile' && item.fileName) { + let hdfs = `${definePython}/${this.userId}/${time}/${item.fileName}` + param.tfos_request[key].push({ hdfs: hdfs }) + } else if (item.codeSettings === 'HDFSPath' && item.HDFSPath) { + param.tfos_request[key].push({ hdfs: item.HDFSPath }) + } + } + } + let envHDFSPath = this.form.pythonType === 'Standard' ? this.form.pythonOption : this.form.pythonInput + param.tfos_request.tensorflow_env = { hdfs: envHDFSPath } + }, + validateForm () { + let isValid = false + this.$refs.formValidate.validate((valid) => { + isValid = valid + }) + return isValid + }, + validateFormChangeTab (keyArr, errObj, activeName) { // keyArr tab对应表单下所有字段,errObj 校验不通过返回的不通过字段,activeName 当前tab index + let tabActiveArr = ['0', '1', '2'] // tab index总数组成数组 + let currentTab = false // 判断是否当前tab有字段未填 + const _this = this + let isEmit = false // 是否切换过tab + judgmentTba(keyArr[activeName], activeName) + if (!currentTab) { + tabActiveArr.splice(activeName, 1) // 如果不是当前tab表单字段不通过则循环其他tab字段 + for (let index of tabActiveArr) { + if (!isEmit) { + judgmentTba(keyArr[index], index) + } + } + } + function judgmentTba (formLabel, index) { // formLabel-> tab下表单字段; index-> formLabel对应哪个tab index + for (let item of formLabel) { + if (errObj[item]) { + if (index !== activeName) { + _this.$emit('changeTab', index) + isEmit = true + } else { + currentTab = true + } + return + } + } + } + }, + mfflowBasicData () { + return { + method: '/api/rest_j/v1/entrance/execute', + params: { + ariable: { + k1: 'v1' + }, + configuration: { + special: { + k2: 'v2' + }, + runtime: { + 'k3': 'v3' + }, + startup: { + k4: 'v4' + } + } + }, + executeApplicationName: 'spark', + runType: 'sql', + source: { + scriptPath: '/home/Linkis/Linkis.sql' + } + } + }, + handleOriginNodeData (val, initForm) { + this.currentNode = val + if (this.currentNode.jobContent && this.currentNode.jobContent.ManiFest) { + const form = this.currentNode.jobContent.ManiFest + // let newFormData = this.basicInfoProcess(form) + // this.handelTfosCopy(newFormData) + // Object.assign(this.form, newFormData) + if (form.name) { + if (form.job_alert) { + this.alarmData = form.job_alert + } + switch (this.currentNode.type) { + case 'linkis.appjoint.mlflow.gpu': + this.handleGpuNodeData(form) + break + case 'linkis.appjoint.mlflow.hadoop': + this.handleHadoopNodeData(form) + break + } + } + } else { + this.form = JSON.parse(JSON.stringify(initForm)) + this.form.name = this.currentNode.title + setTimeout(() => { + this.$refs.formValidate.clearValidate() + }, 10) + } + } + } +} diff --git a/ui/src/views/AIDE/index.vue b/ui/src/views/AIDE/index.vue index 89d3236..5286bc7 100644 --- a/ui/src/views/AIDE/index.vue +++ b/ui/src/views/AIDE/index.vue @@ -1,106 +1,56 @@ diff --git a/ui/src/views/Login.vue b/ui/src/views/Login.vue index 5846e60..f883a89 100644 --- a/ui/src/views/Login.vue +++ b/ui/src/views/Login.vue @@ -1,6 +1,5 @@