diff --git a/.gitignore b/.gitignore index f733c4b..397e3dd 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,23 @@ -config/ +config/* +!config/api_key.txt + +openmlsource/* +!openmlsource/readme.md + +OpenML/ +OpenML2/ +OpenML-backup/ + +openml.org/ +portml-website/ +portml-platform/ + +storage/datastore/* +storage/es/* +storage/mysql/* +storage/minio/* +portml-website + +docker-compose.yml +docker-compose-aws.yml +docker-compose-python.yml diff --git a/aws-readme.md b/aws-readme.md new file mode 100644 index 0000000..12981a6 --- /dev/null +++ b/aws-readme.md @@ -0,0 +1,84 @@ +# AWS instructions + +## Services used + +1. Elastic Container Registry + - Used to upload and store images of different containers +2. Elastic Container Service + - Running the containers as separate instances. Using Fargate clusters to run all container tasks in a single service +3. Elastic File System + - Running storage for shared volumes (MySQL, ES, config, and processing data) storage. +4. EC2 Load balancer + - Used to route traffic from PortML domain to correct instances on ECS +5. CloudWatch + - Logging for ECS containers + +## Compiling AWS images + + +## Uploading new images +Once per session connect docker to AWS ECR: +``` +aws ecr get-login-password --region [AWS_REGION] | docker login --username AWS --password-stdin [AWS_ACCOUNT_ID].dkr.ecr.[AWS_REGION].amazonaws.com +``` +Fill in the AWS region (twice) and the AWS account ID + +Next tag your images to push: +``` +docker tag [DOCKER_IMAGE_NAME] [AWS_ACCOUNT_ID].dkr.ecr.[AWS_REGION].amazonaws.com/[AWS_ECR_REPOSITORY_NAME] +``` +When successful push the image to AWS ECR: +``` +docker push [AWS_ACCOUNT_ID].dkr.ecr.[AWS_REGION].amazonaws.com/[AWS_ECR_REPOSITORY_NAME] +``` + +## Setting the task definitions on AWS ECS +Copy ``.reactenv`` file in ``[FLASK directory]`` and rename to ``.reactenv_aws`` and set all the settings according to the AWS settings to be used + +Instead of the default ``docker compose`` up command use the following chained commands to make use of the defined AWS compose file: ``docker-compose-aws.yml`` +``` +docker-compose rm -f ; docker-compose pull ; docker-compose -f docker-compose.yml -f docker-compose-aws.yml up --build +``` + +## Running config commands + +``` +aws ecs execute-command --region [AWS_REGION] --cluster fargate-cluster --task [TASKID] --container website --command "php index.php cron init_local_env" --interactive +``` + + +## Additional info +Extra information regarding AWS previously posted into Slack + +### To update loadbalancers on service: +``` +aws ecs update-service --service portml-dev-service --cluster fargate-cluster --cli-input-json file://D:\Work\PortML\load-v3.json +``` +jsonContent: +``` +{ + "loadBalancers" : + [ + { + "targetGroupArn": "arn:aws:elasticloadbalancing:eu-west-1:482280224187:targetgroup/portml-dev-website-new-tg/1771310010e11e59", + "containerName": "website_new", + "containerPort": 5000 + }, + { + "targetGroupArn": "arn:aws:elasticloadbalancing:eu-west-1:482280224187:targetgroup/portml-dev-elasticsearch-tg/1ff8abd71c2954a6", + "containerName": "elasticsearch", + "containerPort": 9200 + }, + { + "targetGroupArn": "arn:aws:elasticloadbalancing:eu-west-1:482280224187:targetgroup/portml-dev-website-tg/6171a39595c1fd72", + "containerName": "website", + "containerPort": 80 + } + ] +} +``` + +### Run PhP my admin for AWS (if port is open) +``` +docker run --name myadmin -d -e PMA_HOST=[IP of task] -p 8000:80 phpmyadmin +``` \ No newline at end of file diff --git a/config/api_key.txt b/config/api_key.txt new file mode 100644 index 0000000..b8daff4 --- /dev/null +++ b/config/api_key.txt @@ -0,0 +1 @@ +0123456789abcdef0123456789abcdef \ No newline at end of file diff --git a/docker-compose-aws-template.yml b/docker-compose-aws-template.yml new file mode 100644 index 0000000..e6564b7 --- /dev/null +++ b/docker-compose-aws-template.yml @@ -0,0 +1,47 @@ +# OpenML dev docker-compose +# For local dev environment setup +# *Insecure & Not for Production Setup* + +# Notes: +# issues: Rui Quintino,https://github.com/rquintino +# config folder is used to share some config keys after init between containers +# $ needs escaping to $$ in docker_compose file, $api_key is then $$api_key +# TODO: gollum not working, CORS issue, needs apache mapping, local gollum install on website container? +# OpenML main repo is mapped as volume, not copied within the built images, for enable development + +version: '2' + +services: + # MySQL dbs + mysql: + image: mysql:5.7.25 + container_name: mysql_test + platform: linux/x86_64 + ports: + # : < MySQL Port running inside container> + - '3307:3306' + build: + #Specify the build file + context: ./mysql + dockerfile: DockerfileAws + environment: + MYSQL_ROOT_PASSWORD: "password" + MYSQL_DATABASE: openml + volumes: + - ./storage/mysql:/storage + + website_new: + build: + context: ./ + dockerfile: ./website-new/DockerfileAws + args: + - sourceDirectory=openml.org + ports: + - "5000:5000" + links: + - mysql + depends_on: + - mysql + - elasticsearch + environment: + - PYTHONUNBUFFERED=1 diff --git a/docker-compose-template.yml b/docker-compose-template.yml new file mode 100644 index 0000000..b23bd0a --- /dev/null +++ b/docker-compose-template.yml @@ -0,0 +1,139 @@ +# OpenML dev docker-compose +# For local dev environment setup +# *Insecure & Not for Production Setup* + +# Notes: +# issues: Rui Quintino,https://github.com/rquintino +# config folder is used to share some config keys after init between containers +# $ needs escaping to $$ in docker_compose file, $api_key is then $$api_key +# TODO: gollum not working, CORS issue, needs apache mapping, local gollum install on website container? +# OpenML main repo is mapped as volume, not copied within the built images, for enable development + +version: '2' + +services: + # MySQL dbs + mysql: + image: mysql:5.7.25 + container_name: mysql_test + platform: linux/x86_64 + ports: + # : < MySQL Port running inside container> + - '3307:3306' + build: ./mysql + environment: + MYSQL_ROOT_PASSWORD: "root_password" + MYSQL_DATABASE: openml + volumes: + - ./storage/mysql:/storage + + #Flask + React website + website_new: + build: + context: ./ + dockerfile: ./website-new/Dockerfile + args: + - sourceDirectory=openml.org + ports: + - "5000:5000" + links: + - mysql + depends_on: + - mysql + - elasticsearch + environment: + - PYTHONUNBUFFERED=1 + - DATABASE_URI=mysql+pymysql://root:[fil-in PASSWORD]@[fill-in MYSQL_CONTAINER_NAME]:3306/[fill-in DATABASE_NAME] + - ELASTICSEARCH_SERVER=[ES_URL_SECURED] + - ELASTICSEARCH_CONTAINER_URL=[ES_URL] + - APP_SECRET_KEY=abcd + - JWT_SECRET_KEY=abcd + - SERVER_URL=http://website_new:5000/ + - REDIRECT_URL=http://website_new:5000 + - BACKEND_SERVER=http://website/api/v1/xml/ + - BACKEND_BASE_URL=http://website/ + - BACKEND_CONTAINER_URL=http://website/ + - SEND_EMAIL=False + - DASHBOARD_USE_DOCKER_CONTAINER_NAME=FALSE + - DASHBOARD_PHP_CONTAINER_NAME=website + - AUTH_URL_PREFIX=/secure + - REQUIRE_CONFIRMED_REGISTERING=False + - EXTERNAL_WEBISTE_URL=http://localhost:5000/ + + # Elastic search indexes + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:6.8.2 + build: ./elasticsearch + command: elasticsearch + ports: + - "9200:9200" + - "9300:9300" + volumes: + - ./storage/es:/esdata + + # Php website + website: + build: + context: ./ + dockerfile: ./website/Dockerfile + args: + - sourceDirectory=OpenML + ports: + - 80:80 + volumes: + - ./config:/openmlconfig + - ./storage/datastore:/datastore + depends_on: + - mysql + - elasticsearch + environment: + DB_NAME_OPENML: 'openml' + DB_HOST_OPENML: "mysql_test" + DB_USER_OPENML: 'username' + DB_PASS_OPENML: 'user_password' + DB_NAME_EXPDB: 'openml_expdb' + DB_HOST_EXPDB: "mysql_test" + DB_USER_EXPDB_READ: 'username' + DB_PASS_EXPDB_READ: 'user_password' + DB_USER_EXPDB_WRITE: 'username' + DB_PASS_EXPDB_WRITE: 'user_password' + ES_URL: 'elasticsearch:9200' + ES_PUBLIC_URL: 'localhost:9200' + BASE_URL: 'http://localhost/' + DATA_PATH: '/datastore/' + DIRECTORY_PATH: '/var/www/html/' + API_USERNAME: 'fill in' + API_KEY: 'fill in' + + # MySQL admin UI (tool) + phpmyadmin: + image: phpmyadmin/phpmyadmin:latest + volumes: + - ./config:/openmlconfig + ports: + - 8080:80 + environment: + - PMA_HOST=mysql + depends_on: + - mysql + + # OpenML Java background workers + java: + build: ./java + image: amazoncorretto + volumes: + - ./config:/openmlconfig + - ./openmlsource/:/openmlsource + entrypoint: /entrypoint.sh + + minio: + build: ./minio + ports: + - "9000:9000" + - "9001:9001" + volumes: + - ./storage/minio:/data + environment: + MINIO_ROOT_USER: FILL-IN + MINIO_ROOT_PASSWORD: FILL-IN + command: server --console-address ":9001" /data diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 3dacf2d..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,73 +0,0 @@ -# OpenML dev docker-compose -# For local dev environment setup -# *Insecure & Not for Production Setup* - -# Notes: -# issues: Rui Quintino,https://github.com/rquintino -# config folder is used to share some config keys after init between containers -# $ needs escaping to $$ in docker_compose file, $api_key is then $$api_key -# TODO: gollum not working, CORS issue, needs apache mapping, local gollum install on website container? -# OpenML main repo is mapped as volume, not copied within the built images, for enable development - -version: '2' - -services: - # MySQL dbs - mysql: - image: mysql:5.7.25 - command: mysqld --default-authentication-plugin=mysql_native_password - build: ./mysql - environment: - MYSQL_ROOT_PASSWORD: "" - MYSQL_DATABASE: openml - - - # Elastic search indexes - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:6.8.2 - build: ./elasticsearch - command: elasticsearch - ports: - - "9200:9200" - - "9300:9300" - - # Php frontend - website: - build: ./website - ports: - - 80:80 - volumes: - - ./config:/openmlconfig - - ./OpenML:/var/www/html - depends_on: - - mysql - - elasticsearch - - gollum - - # MySQL admin UI (tool) - phpmyadmin: - image: phpmyadmin/phpmyadmin:latest - volumes: - - ./config:/openmlconfig - ports: - - 8080:80 - environment: - - PMA_HOST=mysql - depends_on: - - mysql - - # OpenML Java background workers - java: - image: java - volumes: - - ./config:/openmlconfig - - ./OpenML:/var/www/html - - # runs openml process_dataset java background task each 10 secs - command: /bin/bash -c 'while sleep 10;do api_key=`cat /openmlconfig/api_key.txt`; echo "API key:$$api_key"; java -Xmx4G -jar /var/www/html/openml_OS/third_party/OpenML/Java/evaluate.jar -config "server = http://website/;api_key = $$api_key" -f process_dataset;done' - - # OpenML Wiki, TODO: not working , CORS/cross domain issue - gollum: - build: ./gollum - ports: - - 9080:80 diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile index 9169c69..c30792e 100644 --- a/elasticsearch/Dockerfile +++ b/elasticsearch/Dockerfile @@ -1,4 +1,4 @@ -FROM elasticsearch +FROM elasticsearch:6.8.2 LABEL maintainer="Rui Quintino,https://github.com/rquintino" COPY ./elasticsearch.yml /usr/share/elasticsearch/config/ \ No newline at end of file diff --git a/elasticsearch/elasticsearch.yml b/elasticsearch/elasticsearch.yml index 7b8be73..a77f49d 100644 --- a/elasticsearch/elasticsearch.yml +++ b/elasticsearch/elasticsearch.yml @@ -8,4 +8,7 @@ http.host: 0.0.0.0 http.cors.enabled: true http.cors.allow-origin: "*" http.cors.allow-methods: OPTIONS, HEAD, GET, POST, PUT, DELETE -http.cors.allow-headers: "X-Requested-With,X-Auth-Token,Content-Type, Content-Length, Authorization" \ No newline at end of file +http.cors.allow-headers: X-Requested-With,X-Auth-Token,Content-Type,Content-Length,Authorization + +#set data path +path.data: /esdata diff --git a/images/compose-mysql-config.PNG b/images/compose-mysql-config.PNG new file mode 100644 index 0000000..b72e696 Binary files /dev/null and b/images/compose-mysql-config.PNG differ diff --git a/images/compose-php-config.PNG b/images/compose-php-config.PNG new file mode 100644 index 0000000..25f7392 Binary files /dev/null and b/images/compose-php-config.PNG differ diff --git a/images/storage-directory.PNG b/images/storage-directory.PNG new file mode 100644 index 0000000..5aad74e Binary files /dev/null and b/images/storage-directory.PNG differ diff --git a/java/Dockerfile b/java/Dockerfile new file mode 100644 index 0000000..0de641b --- /dev/null +++ b/java/Dockerfile @@ -0,0 +1,4 @@ +FROM amazoncorretto + +COPY ./entrypoint.sh /entrypoint.sh +COPY ./entrypoint-aws.sh /entrypoint-aws.sh \ No newline at end of file diff --git a/java/entrypoint-aws.sh b/java/entrypoint-aws.sh new file mode 100644 index 0000000..e2ee218 --- /dev/null +++ b/java/entrypoint-aws.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +while sleep 10; + do + api_key=`cat /openmlconfig/api_key.txt`; + echo "API key:$api_key"; + java -Xmx4G -jar /openmlsource/Java/evaluate.jar -config "server = http://localhost/;api_key = $api_key" -f process_dataset; + java -Xmx4G -jar /openmlsource/Java/evaluate.jar -config "server = http://localhost/;api_key = $api_key" -f evaluate_run; + done \ No newline at end of file diff --git a/java/entrypoint.sh b/java/entrypoint.sh new file mode 100644 index 0000000..96c3d3d --- /dev/null +++ b/java/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +while sleep 10; + do + api_key=`cat /openmlconfig/api_key.txt`; + echo "API key:$api_key"; + java -Xmx4G -jar /openmlsource/Java/evaluate.jar -config "server = http://website/;api_key = $api_key" -f process_dataset; + java -Xmx4G -jar /openmlsource/Java/evaluate.jar -config "server = http://website/;api_key = $api_key" -f evaluate_run; + done \ No newline at end of file diff --git a/minio-process/Dockerfile b/minio-process/Dockerfile new file mode 100644 index 0000000..7eb552d --- /dev/null +++ b/minio-process/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3.7-slim + +# Install python dependecies +RUN apt-get -y update && apt-get install +RUN apt-get install -y python3-pip python3-dev + +# Copy requirements file to Docker +COPY ./requirements.txt /requirements.txt + +# Install Python libraries from requirements file +RUN pip3 install -r requirements.txt + +# Copy entrypoint file +COPY ./entrypoint.sh /entrypoint.sh + +#Copt files +COPY ./upload-to-minio.py /upload-to-minio.py \ No newline at end of file diff --git a/minio-process/entrypoint.sh b/minio-process/entrypoint.sh new file mode 100644 index 0000000..fe46ba6 --- /dev/null +++ b/minio-process/entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +while sleep 10; + do + python upload-to-minio.py + done \ No newline at end of file diff --git a/minio-process/requirements.txt b/minio-process/requirements.txt new file mode 100644 index 0000000..c6b95cd --- /dev/null +++ b/minio-process/requirements.txt @@ -0,0 +1,2 @@ +minio +pandas \ No newline at end of file diff --git a/minio-process/upload-to-minio.py b/minio-process/upload-to-minio.py new file mode 100644 index 0000000..94aa3a1 --- /dev/null +++ b/minio-process/upload-to-minio.py @@ -0,0 +1,60 @@ +# import minio +# import glob +# import os +# import json +# from minio import Minio +# import urllib3 +# from minio.error import S3Error +# import pandas as pd + +import os + +from minio.error import S3Error + +from minio import Minio + +print(os.getenv('MINIO_SERVER_NAME')) +# import pandas as pd +# import json + +try: + minio_client = Minio( + "172.19.0.6:9000", + access_key=os.getenv('MINIO_ACCESS_KEY'), + secret_key=os.getenv('MINIO_SECRET_KEY'), + secure=False + ) + + buckets = minio_client.list_buckets() + for bucket in buckets: + print(bucket.name, bucket.creation_date) +except S3Error as error: + print("Error occured. ", error) + +# df = pd.read_csv(f"") +# df.to_parquet(f"") +# client.make_bucket(f"") +# client.fput_object( +# f"dataset.pq", f"/dataset.pq" +# ) +# policy_read_only = { +# "Version": "2012-10-17", +# "Statement": [ +# { +# "Effect": "Allow", +# "Principal": {"AWS": "*"}, +# "Action": ["s3:GetBucketLocation", "s3:ListBucket"], +# "Resource": f"arn:aws:s3:::dataset", +# }, +# { +# "Effect": "Allow", +# "Principal": {"AWS": "*"}, +# "Action": "s3:GetObject", +# f"Resource": f"arn:aws:s3:::dataset/*", +# }, +# ], +# } + +# client.set_bucket_policy(f'dataset{}', json.dumps(policy_read_only)) +# print(f'dataset uploaded') +print("Test") diff --git a/minio/Dockerfile b/minio/Dockerfile new file mode 100644 index 0000000..1233a73 --- /dev/null +++ b/minio/Dockerfile @@ -0,0 +1,3 @@ +FROM minio/minio:edge + +# CMD ["minio", "server", "/data"] \ No newline at end of file diff --git a/mysql/Dockerfile b/mysql/Dockerfile index 7a7a4f3..947f2de 100644 --- a/mysql/Dockerfile +++ b/mysql/Dockerfile @@ -1,7 +1,18 @@ -FROM mysql +FROM mysql:5.7.25 LABEL maintainer="Rui Quintino,https://github.com/rquintino" # *.sql files in /docker-entrypoint-initdb.d are executed at mysql instance start, by alphabetical order ADD ./openml_expdb.sql /docker-entrypoint-initdb.d ADD ./openml.sql /docker-entrypoint-initdb.d +COPY ./custom-mysql.cnf /etc/mysql/conf.d/ +COPY ./portml-mysql.cnf /etc/mysql/my.cnf + +# RUN chown -R mysql:mysql /storage + +# RUN mkdir -p /storage +# RUN chmod -R 777 /storage +# RUN groupadd -r -g 6000 mysqlgroup +# RUN useradd -r -u 6000 -g 6000 mysqluser +# USER mysqluser +# VOLUME ["/storage"] \ No newline at end of file diff --git a/mysql/DockerfileAws b/mysql/DockerfileAws new file mode 100644 index 0000000..a2e225b --- /dev/null +++ b/mysql/DockerfileAws @@ -0,0 +1,16 @@ + +FROM mysql:5.7.25 +LABEL maintainer="Rui Quintino,https://github.com/rquintino" + +# *.sql files in /docker-entrypoint-initdb.d are executed at mysql instance start, by alphabetical order +ADD ./openml_expdb.sql /docker-entrypoint-initdb.d +ADD ./openml.sql /docker-entrypoint-initdb.d +COPY ./custom-mysql.cnf /etc/mysql/conf.d/ +COPY ./portml-mysql.cnf /etc/mysql/my.cnf + +RUN mkdir -p /storage +RUN chmod -R 777 /storage +RUN groupadd -r -g 6000 mysqlgroup +RUN useradd -r -u 6000 -g 6000 mysqluser +USER mysqluser +# VOLUME ["/storage"] \ No newline at end of file diff --git a/mysql/custom-mysql.cnf b/mysql/custom-mysql.cnf new file mode 100644 index 0000000..dd829be --- /dev/null +++ b/mysql/custom-mysql.cnf @@ -0,0 +1,2 @@ +[mysqld] +default-authentication-plugin = mysql_native_password \ No newline at end of file diff --git a/mysql/openml_expdb.sql b/mysql/openml_expdb.sql index 657b12a..6aadb7c 100644 --- a/mysql/openml_expdb.sql +++ b/mysql/openml_expdb.sql @@ -95,7 +95,6 @@ CREATE TABLE `dataset` ( `name` varchar(128) COLLATE utf8_unicode_ci NOT NULL, `version` varchar(64) COLLATE utf8_unicode_ci NOT NULL, `version_label` varchar(128) COLLATE utf8_unicode_ci DEFAULT NULL, - `description` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `format` varchar(64) COLLATE utf8_unicode_ci NOT NULL DEFAULT 'arff', `creator` text COLLATE utf8_unicode_ci, `contributor` text COLLATE utf8_unicode_ci, @@ -106,6 +105,7 @@ CREATE TABLE `dataset` ( `citation` text COLLATE utf8_unicode_ci, `collection` varchar(64) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `url` mediumtext CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `parquet_url` mediumtext CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `isOriginal` enum('true','false') CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `file_id` int(10) DEFAULT NULL, `default_target_attribute` varchar(1024) COLLATE utf8_unicode_ci DEFAULT NULL, @@ -147,6 +147,33 @@ CREATE TABLE `dataset_tag` ( -- -------------------------------------------------------- + +-- +-- Table structure for table `dataset_description` +-- + +CREATE TABLE `dataset_description` ( + `did` int(10) UNSIGNED NOT NULL, + `version` int(10) UNSIGNED NOT NULL, + `description` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, + `uploader` mediumint(8) UNSIGNED NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; + +-- -------------------------------------------------------- + + +-- Table structure for table `dataset_topic` +-- + +CREATE TABLE `dataset_topic` ( + `id` int(10) UNSIGNED NOT NULL, + `topic` varchar(255) NOT NULL, + `uploader` mediumint(8) UNSIGNED NOT NULL, + `date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- -------------------------------------------------------- + -- -- Table structure for table `data_feature` -- @@ -183,7 +210,7 @@ CREATE TABLE `data_feature` ( CREATE TABLE `data_feature_value` ( `did` int(10) UNSIGNED NOT NULL, `index` int(10) UNSIGNED NOT NULL, - `value` varchar(256) NOT NULL + `value` varchar(256) NOT NULL COLLATE latin1_general_cs ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -- -------------------------------------------------------- @@ -897,6 +924,20 @@ ALTER TABLE `dataset_tag` ADD PRIMARY KEY (`id`,`tag`), ADD KEY `uploader` (`uploader`); +-- +-- Indexes for table `dataset_description` +-- +ALTER TABLE `dataset_description` + ADD PRIMARY KEY (`did`,`version`), + ADD KEY `uploader` (`uploader`); + +-- +-- Indexes for table `dataset_topic` +-- +ALTER TABLE `dataset_topic` + ADD PRIMARY KEY (`id`,`topic`), + ADD KEY `uploader` (`uploader`); + -- -- Indexes for table `data_feature` -- @@ -909,7 +950,8 @@ ALTER TABLE `data_feature` -- Indexes for table `data_feature_value` -- ALTER TABLE `data_feature_value` - ADD KEY `did` (`did`,`index`); + ADD KEY `did` (`did`,`index`), + ADD UNIQUE(`did`, `index`, `value`); -- -- Indexes for table `data_processed` @@ -1353,6 +1395,13 @@ ALTER TABLE `dataset_status` ADD CONSTRAINT `dataset_status_ibfk_1` FOREIGN KEY (`did`) REFERENCES `dataset` (`did`) ON DELETE CASCADE, ADD CONSTRAINT `dataset_status_ibfk_2` FOREIGN KEY (`user_id`) REFERENCES `openml`.`users` (`id`); +-- +-- Constraints for table `dataset_description` +-- +ALTER TABLE `dataset_description` + ADD CONSTRAINT `dataset_description_ibfk_1` FOREIGN KEY (`did`) REFERENCES `dataset` (`did`) ON DELETE CASCADE, + ADD CONSTRAINT `dataset_description_ibfk_2` FOREIGN KEY (`uploader`) REFERENCES `openml`.`users` (`id`); + -- -- Constraints for table `dataset_tag` -- @@ -1360,6 +1409,13 @@ ALTER TABLE `dataset_tag` ADD CONSTRAINT `dataset_tag_ibfk_1` FOREIGN KEY (`uploader`) REFERENCES `openml`.`users` (`id`) ON DELETE CASCADE ON UPDATE CASCADE, ADD CONSTRAINT `fk_dataset_tag` FOREIGN KEY (`id`) REFERENCES `dataset` (`did`) ON DELETE CASCADE; +-- +-- Constraints for table `dataset_topic` +-- +ALTER TABLE `dataset_topic` + ADD CONSTRAINT `dataset_topic_ibfk_1` FOREIGN KEY (`uploader`) REFERENCES `openml`.`users` (`id`) ON DELETE CASCADE ON UPDATE CASCADE, + ADD CONSTRAINT `fk_dataset_topic` FOREIGN KEY (`id`) REFERENCES `dataset` (`did`) ON DELETE CASCADE; + -- -- Constraints for table `data_feature` -- diff --git a/mysql/portml-mysql.cnf b/mysql/portml-mysql.cnf new file mode 100644 index 0000000..b225593 --- /dev/null +++ b/mysql/portml-mysql.cnf @@ -0,0 +1,3 @@ +[mysqld] +default-authentication-plugin = mysql_native_password +datadir = /storage \ No newline at end of file diff --git a/openmlsource/readme.md b/openmlsource/readme.md new file mode 100644 index 0000000..68cc69b --- /dev/null +++ b/openmlsource/readme.md @@ -0,0 +1 @@ +## For evaluation engine \ No newline at end of file diff --git a/readme.md b/readme.md index 95aac13..ef2869d 100644 --- a/readme.md +++ b/readme.md @@ -6,7 +6,6 @@ # Issues/limitations - current only works on port 80 - wiki powered by gollum is not working (in the new OpenML frontend this will no longer be used). -- **Only the local admin account works for now.** There is an issue with login and it will not be possible to create and login with other accounts (Work in progress) # Requirements @@ -21,63 +20,130 @@ For Linux: docker commands assume you can use docker without sudo (your user is ### Step 1: Clone the repos +First the Docker repo with the general structure and database files is cloned. Once this repo is cloned the content repos for the PHP website and Flask website are cloned in subfolders of this Docker repo. Multiple source folders for different versions of the websites are supported. This allows you to quickly change between two versions of the same website. -Note that we clone docker_changes branch of OpenML repo. + +#### 1. Docker repo +Clone and change into the Docker directory with the following commands ``` -git clone https://github.com/openml/openml-docker-dev.git +git clone -b [Select correct branch] https://github.com/openml/openml-docker-dev.git [Optional the name of the new directory (openml-docker-dev)] -cd openml-docker-dev +cd [name of directory] +``` -git clone -b docker_changes https://github.com/openml/OpenML.git +#### 2. PHP website repo +Clone the OpenML repo into the Docker directory. Select the "docker_changes" branch or any other branch that supports Docker integration +``` +git clone -b docker_changes https://github.com/openml/OpenML.git [PHP directory name (OpenML)] ``` ![](images/2018-04-07-00-57-29.png) -### Step 2: Configure docker and OpenML +#### 3. Flask website -Edit *docker-compose.yml* mainly define a secure **mysql password**: +Clone the new website from here (also inside Docker directory), keep track of the folder name +- Branch name TBD -**(leaving the default will make docker-compose fail)** +``` +git clone -b docker https://github.com/OpenML/openml.org.git [Flask directory name (openml.org)] +``` -![](images/passwordsql.PNG) +* for PortML clone with +``` +git clone -b docker https://github.com/PortML/openml.org-docker.git [Flask directory name (openml.org)] +``` + +### Step 2: Set structure for storage folder +The Docker configuration uses host volumes to store data. This allows persistent data over restarts of the docker instances. In order to set this up the following folder structure must be present in the ``storage`` folder in the main Docker directory. + +![](images/storage-directory.PNG) +Also copy the content of ``[PHP website directory]/data`` into ``storage/datastore`` this is used to initiate demo data -Copy *OpenML\openml_OS\config\BASE_CONFIG-BLANK.php* to *OpenML\openml_OS\config\BASE_CONFIG.php* +Note that deleting content in the ``es`` or ``mysql`` folder will delete the ElasticSearch and MySQL database respectively. The initialization scrips need to be run afterwards to init the demo data. Deleting one and not the other will likely result in data not correctly be referenced in the website. -Check & change *BASE_CONFIG.php* as appropriate: +### Step 3: Configure Docker and the websites +Most backend configurations can be set in the ``docker-compose.yml`` file. Front-end configurations for the React app needs to be defined in a separate environment file as these are compiled without the use of docker environment variables. -Define BASE_URL as localhost: +1. Copy ``docker-compose-template.yml`` and rename it to ``docker-compose.yml`` -![](images/2018-04-07-01-01-52.png) +2. Edit MySQL configuration in ``docker-compose.yml`` mainly define a secure **mysql password**, optional change the database name: -Define path and data path. In the docker compose, note that /var/www/html is mapped to ./OpenML. + **(leaving the default will make docker-compose fail)** -![](images/2018-04-07-01-02-07.png) + ![](images/compose-mysql-config.PNG) -![](images/datapath.PNG) -Configure details for the experiment database. +3. Copy ``[PHP directory]\openml_OS\config\BASE_CONFIG-BLANK-ENV.php`` to ``[PHP directory]\openml_OS\config\BASE_CONFIG.php`` -![](images/pass1.PNG) +4. Edit the ``website`` environment variables in ``docker-compose.yml``: -Configure details for the OpenML database. + ![](images/compose-php-config.PNG) -![](images/pass2.PNG) + Change the usernames and passwords fields, (bby default you can use ``root`` as username and the password set in step 2) + + Other variables can be changed as well: + - The ``HOST`` variables reference to the location of the MySQL database, for docker this is the container name of the MySQL image (mysql_test) + - The ``ES_URL``s reference to the location of the ElasticSearch instance + - Note that changing the ``DATA_PATH`` variable also requires you to change the volume definition for the datastore in the same compose file + - It is not advised to change the ``DIRECTORY_PATH`` vairable as this also requires you to edit the ``Dockerfile`` in the ``website`` directory to reflect these changes so the source files are copied to the correct location inside the Docker image -Configure elastic search. +5. Optional non environmental changes in the ``[PHP directory]\openml_OS\config\BASE_CONFIG.php`` file -![](images/2018-04-07-01-03-52.png) + ![](images/2018-04-07-01-04-02.png) -![](images/2018-04-07-01-04-02.png) +6. Disable email activation in ``[PHP directory]OpenML/openml_OS/config/ion_auth.php`` -Disable email activation in *OpenML\openml_OS\config\ion_auth.php* +7. Flask configurations + 1. Copy and rename ``.flaskenv_TEMPLATE`` to ``.flaskenv`` in the ``[Flask website directory]`` + 2. Copy and rename ``TEMPLATE.reactenv`` to ``.reactenv`` in the ``[Flask website directory]`` or rename to ``.reactenv_aws`` for AWS deployment. + 3. Edit DATABASE_URI field in ``.flaskenv`` to add in the mysql password in place of PASSWORD- + (use the same password as the mysql password in docker-compose.yml) + 4. Fill in the following environmental variables in the ``docker compose.yml`` file under the ``website_new`` service. These variables will overwrite the variables in ``.flaskenv`` but this is kept in place for backwards compatibility: + 1. DATABASE_URI=mysql+pymysql://root:[fil-in PASSWORD]@[fill-in MYSQL_CONTAINER_NAME]:3306/[fill-in DATABASE_NAME] + - Fill in the PASSWORD, MYSQL_CONTAINER_NAME (``mysql_test``), and DATABASE_NAME (``openml``) + 2. ELASTICSEARCH_SERVER=[ES_URL] , default is ``http://elasticsearch:9200/`` + 3. APP_SECRET_KEY and JWT_SECRET_KEY + 4. SERVER_URL, fill in container name of new website and the port (``http://website_new:5000/``) + 5. REDIRECT_URL, to new website (``http://website_new:5000``) + 6. BACKEND_SERVER, link to PHP backend server to set Python API server (``http://website/api/v1/xml/``) + 7. SEND_EMAIL, used for disabling sending emails in development (currently set to ``False`` as configuration is not yet tested with sending emails using the Docker setup) + 8. DASHBOARD_USE_DOCKER_CONTAINER_NAME, when using Docker downloading datasets from the dashboard code is not working as the container name should be used. Set this to true to use the DASHBOARD_PHP_CONTAINER_NAME + 9. DASHBOARD_PHP_CONTINER_NAME, name of the older backend container used for dashboard code when running Docker + 4. Note on DATABASE_URI: hostname should be 'mysql_test' (by default), the container name of database: + ``DATABASE_URI=mysql+pymysql://[username]:[password]@mysql_test:3306/openml`` + 5. Update the variables in the ``.reactenv`` file according to the deployment. The template file already is configured to work with localhost and the ports the containers are accessible to (80: PHP; 5000; Flask; 9200: ES) + 6. In order to enable the python debug prints in docker add the following lines of code to the ``docker-compose.yml`` file inside the ``website_new`` service + ```` + environment: + - PYTHONUNBUFFERED=1 + ```` + - Please make sure you rebuild the ``[Flask website directory]`` docker image if you make any changes to these configuration files using: + This will make sure the react image is re-built (Don't do this before the first docker-compose up): + ``` + cd [Flask website directory] + docker build -t openml-docker -f Dockerfile . + ``` + - [Optionally] you can use a hot-reload configuration for the new website with some constrains, see the section below how to set this up -![](images/2018-04-07-01-07-21.png) +8. Copy evaluation engine + - Copy the ``Java`` folder in ``[PHP website directory]/openml_OS/third_party/OpenML`` and copy it to ``openmlsource`` inside the Docker root folder. The Java background worker is configured to get the evaluation engine from this location. [Optional] a compiled JAR file from a newer version of the EvaluationEngine can be placed here inside a ``Java`` folder -### Step 3: Starting docker-compose + + +### Step 4: Starting docker-compose + +Make sure docker is running. + +On the Docker folder, where ``docker-compose.yml`` is located run: ``` docker pull docker.elastic.co/elasticsearch/elasticsearch:6.8.2 @@ -91,7 +157,20 @@ after start wait a few seconds for services to be ready, ex: MySQL ready for con ![](images/2018-04-07-01-12-43.png) -### Step 4 Check phpmyadmin at http://localhost:8080/ +#### Development restarts +During development the Docker image needs to be recreated multiple times. The following commands ensure that old images are removed and a new image is recreated: +``` +docker-compose rm -f +docker-compose pull +docker-compose up --build +``` + +Or chain it: +``` +docker-compose rm -f ; docker-compose pull ; docker-compose up --build +``` + +### Step 5: Check phpmyadmin at http://localhost:8080/ ![](images/2018-04-07-01-13-38.png) @@ -100,14 +179,17 @@ after start wait a few seconds for services to be ready, ex: MySQL ready for con ![](images/2018-04-07-01-14-02.png) -### Step 5: Init dbs, admin user & elastic search indexes +### Step 6: Init dbs, admin user & elastic search indexes +The first command only needs to be run the first time, or every time when the persistent storage is disabled (not advised as configuration changes are required for this) Execute in a new window/shell: ``` -docker exec -it openml-docker-dev_website_1 php index.php cron init_local_env +docker exec -it openml-docker-dev-website-1 php index.php cron init_local_env ``` +The username and API key of the admin is displayed. Copy these and fill them in in the ``docker-compose.yml`` file in the ``website`` service under ``environment`` and then the ``API_USERNAME`` and ``API_KEY`` + (take note the printed admin username and password, and wait to finish, can take 1-2mins) ![](images/localdb.PNG) @@ -122,8 +204,7 @@ docker exec -it openml-docker-dev_website_1 chown -R www-data:www-data /var/www/ ``` - -### Step 6: Final tests +### Step 6: Final tests (Old website) Login on http://localhost with admin and saved password @@ -145,3 +226,52 @@ We have 1 sample dataset ![](images/upload2.PNG) ### Note: Files in OpenML cloned repo are mounted inside the website container, any change will reflect immediately on the site + +### Step 7 New website checks +- Check the new website running at localhost:5000. It should look similar to new.openml.org +- Sign up as a new user in the new website. (Note that you cannot use the admin account from the old website to login here) +- Sign in with your email and password +- You should be able to see your profile +- [Optional] By default the user created above is not an admin. This is required if you want to use the dataset upload. This can be done by loggin into MyPHPAdmin and changing the 'users_groups' row of this user. Set the 'group_id' to 1 (admin group) and save. This is not always needed, only when a user is created before the init commands are ran +- Check Dataset upload (required to fill in all fields) + +### Step 8 Building images for remote deployment +The PortML version of the docker image also includes configurations for remote deployment (AWS) backend images (Flask and PHP) can use environment variables which can be set in the ``docker-compose.yml`` file and in the remote hosting environment. However, frontend (React) and database (MySQL) configurations need pre-build images specifically for the deployment. For this case a second compose file is created that targets these changes. In order to create image for AWS run the following command: +``` +docker-compose -f docker-compose.yml -f docker-compose-aws.yml up -d --build +``` + +If small changes do not trigger an update when pushing to AWS use the following command(s) to build from scratch: + +``` +docker-compose rm -f ; docker-compose pull ; docker-compose -f docker-compose.yml -f docker-compose-aws.yml up -d --build +``` + +All environment settings in the docker files are not used when running the images on AWS, all environment variables therefore need to be set in the AWS task definition per container configuration + +### [Optional hot-reload new website] +Using a hot-reload in docker requires you to set the volume of the source code to your local folder of the new website. Flask runs in development mode and will see changes you make without requireing you to rebuild the image. The only downside is that you are unable to reach the new front-end (React code) via the docker URL. You can seperatly run a node development server for the front-end to also enable hot-reload for the React front-end. + +Add the following lines of code to the main 'docker-compose.yml' file inside the 'website_new' service to enable this hot-reload function: +```` +volumes: + - ./openml.org:/app +```` + +## Troubleshoot +If the new website rebuild doesn't work, try clearing all caches with: +``` +docker system prune -a +``` +and then, pull elastic search before doing docker-compose up. + + +### Java error +On windows by default the entry scripts use the Windows line ending (CRLF) this cannot be committed using git, but needs to be changed to ``LF``. + +### Cannot login with PhpMyadmin +Stop all containers. Try to delete the content of the ``mysql`` and ``es`` folder in the ``storage`` directory. And restart everything cleanly (with removing the previous images) + +## ToDos +- [DONE] Include Flask environmental variables in the docker-compose file +- Improve DockerFile for website-new to copy flask and react part separately to take advantage of the caching of Docker diff --git a/storage/readme.md b/storage/readme.md new file mode 100644 index 0000000..80806a2 --- /dev/null +++ b/storage/readme.md @@ -0,0 +1,14 @@ +Storage folder for all data storage. +Create the following folders: + +datastore: +Data folder for PHP backend, note that this needs to be set in the env settings of the "website" container. Try to copy the data folder of OpenML in this folder to get the same data folder structure and include SQL files to initiate the DB + +es: +ElasticSearch data folder + +mysql: +MySQL data folder + +minio: +MinIO data folder \ No newline at end of file diff --git a/website-new/.dockerignore b/website-new/.dockerignore new file mode 100644 index 0000000..ca57620 --- /dev/null +++ b/website-new/.dockerignore @@ -0,0 +1,2 @@ +**/node_modules +**/build \ No newline at end of file diff --git a/website-new/Dockerfile b/website-new/Dockerfile new file mode 100644 index 0000000..ee9f528 --- /dev/null +++ b/website-new/Dockerfile @@ -0,0 +1,39 @@ +FROM python:3.7-slim + +# Set argument to make source directory variable +ARG sourceDirectory=openml.org + +RUN apt-get -y update && apt-get install -y libzbar-dev sudo +RUN apt-get install -y python3-pip python3-dev +RUN apt-get install -y nodejs npm +EXPOSE 5000 +## We copy just the requirements.txt first to leverage Docker cache +COPY ./${sourceDirectory}/requirements.txt /app/requirements.txt +WORKDIR /app +RUN pip3 install -r requirements.txt +RUN pip3 install python-dotenv + +#copy entire openml.org folder to app folder in image +COPY ./${sourceDirectory} /app + +#Copy env file for correct development +COPY ./${sourceDirectory}/.reactenv /app/.env +COPY ./${sourceDirectory}/.reactenv /app/server/src/client/app/.env + +#Install NPM dependencies and build +WORKDIR /app/server/src/client/app +RUN npm install --legacy-peer-deps +# RUN npm run build +WORKDIR /app +RUN npm install --legacy-peer-deps +RUN npm run build --prefix server/src/client/app/ +CMD ["flask", "run", "--host", "0.0.0.0"] + +# RUN mkdir -p /openmlconfig +# COPY ./${sourceDirectory}/api_key.txt /openmlconfig/api_key.txt + +# "--host", "0.0.0.0" + +# docker build -t openml-docker -f Dockerfile . +# docker run -it -p 5000:5000 --name sahi-openml openml-docker +#sqlite:///openml.db \ No newline at end of file diff --git a/website-new/DockerfileAws b/website-new/DockerfileAws new file mode 100644 index 0000000..353da3a --- /dev/null +++ b/website-new/DockerfileAws @@ -0,0 +1,39 @@ +FROM python:3.7-slim + +# Set argument to make source directory variable +ARG sourceDirectory=openml.org + +RUN apt-get -y update && apt-get install -y libzbar-dev sudo +RUN apt-get install -y python3-pip python3-dev +RUN apt-get install -y nodejs npm +EXPOSE 5000 +## We copy just the requirements.txt first to leverage Docker cache +COPY ./${sourceDirectory}/requirements.txt /app/requirements.txt +WORKDIR /app +RUN pip3 install -r requirements.txt +RUN pip3 install python-dotenv + +#copy entire openml.org folder to app folder in image +COPY ./${sourceDirectory} /app + +#Copy env file for correct development +COPY ./${sourceDirectory}/.reactenv_aws /app/.env +COPY ./${sourceDirectory}/.reactenv_aws /app/server/src/client/app/.env + +#Install NPM dependencies and build +WORKDIR /app/server/src/client/app +RUN npm install --legacy-peer-deps +# RUN npm run build +WORKDIR /app +RUN npm install --legacy-peer-deps +RUN npm run build --prefix server/src/client/app/ +CMD ["flask", "run", "--host", "0.0.0.0"] + +# RUN mkdir -p /openmlconfig +# COPY ./${sourceDirectory}/api_key.txt /openmlconfig/api_key.txt + +# "--host", "0.0.0.0" + +# docker build -t openml-docker -f Dockerfile . +# docker run -it -p 5000:5000 --name sahi-openml openml-docker +#sqlite:///openml.db diff --git a/website-new/DockerfilePython b/website-new/DockerfilePython new file mode 100644 index 0000000..abc6e11 --- /dev/null +++ b/website-new/DockerfilePython @@ -0,0 +1,39 @@ +FROM python:3.7-slim + +# Set argument to make source directory variable +ARG sourceDirectory=openml.org + +RUN apt-get -y update && apt-get install -y libzbar-dev sudo +RUN apt-get install -y python3-pip python3-dev +RUN apt-get install -y nodejs npm +EXPOSE 5000 +## We copy just the requirements.txt first to leverage Docker cache +COPY ./${sourceDirectory}/requirements.txt /app/requirements.txt +WORKDIR /app +RUN pip3 install -r requirements.txt +RUN pip3 install python-dotenv + +#copy entire openml.org folder to app folder in image +# COPY ./${sourceDirectory} /app + +#Copy env file for correct development +COPY ./${sourceDirectory}/.reactenv_aws /app/.env +COPY ./${sourceDirectory}/.reactenv_aws /app/server/src/client/app/.env + +#Install NPM dependencies and build +WORKDIR /app/server/src/client/app +RUN npm install --legacy-peer-deps +# RUN npm run build +WORKDIR /app +RUN npm install --legacy-peer-deps +# RUN npm run build --prefix server/src/client/app/ +CMD ["flask", "run", "--host", "0.0.0.0"] + +# RUN mkdir -p /openmlconfig +# COPY ./${sourceDirectory}/api_key.txt /openmlconfig/api_key.txt + +# "--host", "0.0.0.0" + +# docker build -t openml-docker -f Dockerfile . +# docker run -it -p 5000:5000 --name sahi-openml openml-docker +#sqlite:///openml.db diff --git a/website/Dockerfile b/website/Dockerfile index 42f95f5..df97110 100644 --- a/website/Dockerfile +++ b/website/Dockerfile @@ -1,20 +1,23 @@ FROM php:7.0-apache LABEL maintainer="Rui Quintino,https://github.com/rquintino" +# Set argument to make source directory variable +ARG sourceDirectory=OpenML + # Install PHP extensions and PECL modules. RUN buildDeps=" \ - libbz2-dev \ - default-libmysqlclient-dev \ + libbz2-dev \ + default-libmysqlclient-dev \ " \ runtimeDeps=" \ - curl \ - git \ - libfreetype6-dev \ - libicu-dev \ - libjpeg-dev \ - libmcrypt-dev \ - libpng-dev \ - libpq-dev \ + curl \ + git \ + libfreetype6-dev \ + libicu-dev \ + libjpeg-dev \ + libmcrypt-dev \ + libpng-dev \ + libpq-dev \ " \ && apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y $buildDeps $runtimeDeps \ && docker-php-ext-install bz2 calendar iconv intl mbstring mcrypt mysqli pdo_mysql pdo_pgsql pgsql zip \ @@ -30,10 +33,41 @@ ENV COMPOSER_HOME /root/composer RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer ENV PATH $COMPOSER_HOME/vendor/bin:$PATH +# Install Java +# Install Java for Debian 9.6 +RUN mkdir -p /usr/share/man/man1 +RUN echo 'deb http://deb.debian.org/debian stretch-backports main' > /etc/apt/sources.list.d/backports.list +RUN apt-get update +# RUN apt-get install -y ca-certificates-java --allow-unauthenticated +RUN apt-get -t stretch-backports install -y default-jdk-headless + +# RUN apt update +# RUN apt install default-jdk-headless -y + +# # Set JAVA_HOME +# ENV JAVA_HOME /usr/lib/jvm/java-11-openjdk-amd64/ +# RUN export JAVA_HOME + RUN a2enmod rewrite RUN service apache2 restart RUN ln -sf /dev/stdout /var/log/apache2/access.log -RUN ln -sf /dev/stderr /var/log/apache2/error.log \ No newline at end of file +RUN ln -sf /dev/stderr /var/log/apache2/error.log + +RUN mkdir -p /var/www/html +COPY ./${sourceDirectory} /var/www/html + +USER root + +# #Set correct user permission to execute code +# #https://stackoverflow.com/questions/70360139/docker-php-apache-permission-denied +# RUN usermod -u ${uid} www-data && groupmod -g ${uid} www-data; + +## Disable coping config folder as this will be used by AWS EFS +# RUN mkdir -p /openmlconfig +# COPY ./api_key.txt /openmlconfig/api_key.txt + +# COPY ./entrypoint.sh /entrypoint.sh +# ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file diff --git a/website/api_key.txt b/website/api_key.txt new file mode 100644 index 0000000..b8daff4 --- /dev/null +++ b/website/api_key.txt @@ -0,0 +1 @@ +0123456789abcdef0123456789abcdef \ No newline at end of file