Miscellany

 

LINUX

# list port

$ sudo netstat -nlpt | grep "LISTEN\|name" 

$ sudo lsof -i -n -P | grep "LISTEN\|NAME"

$ ps -ef | grep consumer_name

$ kill -9 PID


# make a user can sudo

$ sudo usermod -aG sudo <username>


# manage with PM2 and node

$ pm2 restart 7 --node-args="--max-http-header-size=81000" 

$ node --max-http-header-size=81000 app


#Create sudo user

$ adduser username


# grand usermod

$ usermod -aG sudo username


# test user

$ su - username






SSH KEY

WINDOWNS

Reference: https://phoenixnap.com/kb/generate-ssh-key-windows-10

# verify if OpenSSH Client is installed

settings>Apps & features>optional features => OppenSSH Client

# command prompt

Windows key> cmd > run as Administrator

# use open-ssh

Ssh-keygen

# generate with putty

Save private key


# Start service agent
> Set-Service ssh-agent -StartupType Automatic > Start-Service ssh-agent # add ssh > ssh-add ~\.ssh\id_rsa_name


LINUX # generate key

$ ssh-keygen -t ed25519 -C "vanloi.elec@gmail.com"

# add key to .ssh

$ eval "$(ssh-agent -s)"


OS

# gen key

 ~ ssh-keygen -t rsa  

# add ssh

  ~ ssh-add .ssh/rsa_yourname  





JENKINS


1. On ubuntu based systems, run " $ sudo visudo "


2. this will open /etc/sudoers file.


3. If your Jenkins user is already in that file, then modify to look like this:


Jenkins ALL=(ALL) NOPASSWD: ALL


4. save the file by doing Ctrl+O (don't save in temp file. save in /etc/sudoers, confirm overwrite)


5. Exit by doing Ctrl+X


6. Relaunch your Jenkins job 


7. You shouldn't see that error message again :)



DOCKER

# Set maxsize

$ sysctl -w vm.max_map_count=262144

 

# prune all volumes

$ docker system prune --all --volumes 

 

$ sudo netstat -tulpn | grep docker


$ sudo service docker restart 


$ sudo systemctl restart docker.service

 

# Docker-Proxy rm:

$ sudo service docker stop sudo rm -f /var/lib/docker/network/files/local-kv.db

 

$ docker-compose -f custom-filename.yml up --force-recreate app


## remove docker

$ docker exec -it kafka /bin/sh





# Create a Python virtual environment and activate it:

python3 -m venv env
source env/bin/activate

STRUCTURE
src/
  |- components/
  |   |- Button/
  |   |   |- Button.tsx
  |   |   |- Button.test.tsx
  |   |   |- Button.module.css
  |   |- Input/
  |       |- Input.tsx
  |       |- Input.test.tsx
  |       |- Input.module.css
  |- pages/
  |   |- Home/
  |   |   |- index.tsx
  |   |   |- Home.module.css
  |   |- About/
  |       |- index.tsx
  |       |- About.module.css
  |- services/
  |   |- api.ts
  |- store/
  |   |- actions/
  |   |   |- authActions.ts
  |   |   |- cartActions.ts
  |   |- reducers/
  |   |   |- authReducer.ts
  |   |   |- cartReducer.ts
  |   |- store.ts
  |- types/
  |   |- index.ts
  |   |- authTypes.ts
  |   |- cartTypes.ts
  |- utils/
  |   |- auth.ts
  |   |- formatPrice.ts
  |- App.tsx
  |- index.tsx


GATEWAY

## gateway

# You should look at the following URL's in order to grasp a solid understanding

# of Nginx configuration files in order to fully unleash the power of Nginx.

# http://wiki.nginx.org/Pitfalls

# http://wiki.nginx.org/QuickStart

# http://wiki.nginx.org/Configuration

#

# Generally, you will want to move this file somewhere, and start with a clean

# file but keep this around for reference. Or just disable in sites-enabled.

#

# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples.

##

 

# Default server configuration

#

server {

listen 80 default_server;

listen [::]:80 default_server;

 

# SSL configuration

#

# listen 443 ssl default_server;

# listen [::]:443 ssl default_server;

#

# Note: You should disable gzip for SSL traffic.

# See: https://bugs.debian.org/773332

#

# Read up on ssl_ciphers to ensure a secure configuration.

# See: https://bugs.debian.org/765782

#

# Self-signed certs generated by the ssl-cert package

# Don't use them in a production server!

#

# include snippets/snakeoil.conf;

 

root /var/www/html/dmp;

client_max_body_size 20M;

client_body_buffer_size 32k;

          client_header_buffer_size   8k;

          large_client_header_buffers 8 64k;

# Add index.php to the list if you are using PHP

index index.html index.htm index.nginx-debian.html;

 

server_name _;

 

location / {

# First attempt to serve request as file, then

# as directory, then fall back to displaying a 404.

try_files $uri $uri/ =404;

}

location /userrole/api/ {

proxy_pass http://localhost:8761/api/ ;

     }

     location /workflow/api/ {

             proxy_pass http://localhost:8765/api/ ;

     }

     location /cms/api/ {

              proxy_pass http://localhost:8770/api/ ;

     }

      location /company/api/ {

              proxy_pass http://localhost:8762/api/ ;

     }

location /credit/api/ {

proxy_pass http://localhost:8768/api/ ;

     }

     location /customer/api/ {

             proxy_pass http://localhost:8767/api/ ;

     }

     location /stockpricing/api/ {

              proxy_pass http://localhost:8769/api/ ;

     }

      location /auth/api/ {

              proxy_pass http://localhost:8760/api/ ;

     }

location /order/api/ {

              proxy_pass http://localhost:8764/api/ ;

     }

      location /product/api/ {

              proxy_pass http://localhost:8763/api/ ;

     }

# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000

#

#location ~ \.php$ {

#        include snippets/fastcgi-php.conf;

#

#        # With php7.0-cgi alone:

#        fastcgi_pass 127.0.0.1:9000;

#        # With php7.0-fpm:

#        fastcgi_pass unix:/run/php/php7.0-fpm.sock;

#}

 

# deny access to .htaccess files, if Apache's document root

# concurs with nginx's one

#

#location ~ /\.ht {

#        deny all;

#}

}

 

 

# Virtual Host configuration for example.com

#

# You can move that to a different file under sites-available/ and symlink that

# to sites-enabled/ to enable it.

#

#server {

#        listen 80;

#        listen [::]:80;

#

#        server_name example.com;

#

#        root /var/www/example.com;

#        index index.html;

#

#        location / {

#                try_files $uri $uri/ =404;

#        }

#}




KAFKA

# list all topic

## normal zookeeper

$ bin/kafka-topics.sh --list --zookeeper localhost:2181

## bootstrap

$ ./bin/kafka-topics.sh --list --bootstrap-server localhost:9092


## within docker

$ docker run --rm -it --net=host landoop/fast-data-dev kafka-topics --list --zookeeper localhost:2181

#create topic 

$ ./bin/kafka-topics.sh --create --zookeeper zookeeper:2181 --replication-factor 1 --partitions 1 --topic PRODUCT_ADD

#create topic handoop

$ docker run --rm -it --net=host landoop/fast-data-dev kafka-topics --zookeeper 127.0.0.1:2181 --topic PRODUCT_ADD --replication-factor 1 --partitions 100 --create

# delete topic

## delete topic PRODUCT_UPDATE 

$ bin/kafka-topics.sh --zookeeper localhost: 2181 -- delete --topic PRODUCT_UPDATE


## delete toppic PRODUCT_UPDATE within docker

$ docker run --rm -it --net=host landoop/fast-data-dev kafka-topics  -- delete --topic PRODUCT_UPDATE


# config delet data topic by time

## normal

bin/kafka-configs --zookeeper localhost:2181 --alter --entity-type topics --entity-name $topicName --add-config retention.ms=1000

## with docker

docker run --rm -it --net=host landoop/fast-data-dev kafka-configs --zookeeper localhost:2181 --alter --entity-type topics --entity-name LOG_UPDATE --add-config retention.ms=100






GIT COMMAND

# BRANCHES

$ git branch List all local branches.

$ git branch -a List remote and local branches.

$ git checkout -b branch_name Create a local branch and switch to it.

$ git checkout branch_name Switch to an existing branch.

$ git push origin branch_name Push branch to remote.

$ git branch -m new_name Rename current branch.

$ git branch -d branch_name Delete a local branch.

$ git push origin :branch_name Delete a remote branch.

 

# LOGS

$ git log --oneline Show commit history in single lines.

$ git log -2 Show commit history for last N commits.

$ git log -p -2 Show commit history for last N commits with diff.

$ git diff Show all local file changes in the working tree.

$ git diff myfile Show changes made to a file.

$ git blame myfile Show who changed what & when in a file.

$ git remote show origin Show remote branches and their mapping to local.

## SHOW GRAHP

git log --all --decorate --oneline --graph

 

# CLEANUP

$ git clean -f Delete all untracked files.

$ git clean -df Delete all untracked files and directories.

$ git checkout -- . Undo local modifications to all files.

$ git reset HEAD myfile Unstage a file.

 

# TAGS

$ git pull --tags Get remote tags.

$ git checkout tag_name Switch to an existing tag.

$ git tag List all tags.

$ git tag -a tag_name -m "tag message" Create a new tag.

$ git push --tags Push all tags to the remote repo.

 

# STASHES

$ git stash save "stash name" && git stash Save changes to a stash.

$ git stash list List all stashes.

$ git stash pop Apply a stash and delete it from the stash list.


# COMMIT

$ git commit -m 'chore: commit-message-here' Allowed <type> values: # feat for a new feature for the user, not a new feature for building script. Such commit will trigger a release bumping a MINOR version. fix for a bug fix for the user, not a fix to a build script. Such commit will trigger a release bumping a PATCH version. perf for performance improvements. Such commit will trigger a release bumping a PATCH version. docs for changes to the documentation. style for formatting changes, missing semicolons, etc. refactor for refactoring production code, e.g. renaming a variable. test for adding missing tests, and refactoring tests; no production code change. build for updating build configuration, development tools or other changes irrelevant to the user.


# DELETE BRANCH
## Local
`
  $ git branch -d <Branch Name>
`

## Remote

 $ git push origin --delete <Branch Name>
 $ git fetch --all --prune
`

# WHEN PULL FAILURE
`
 $ git gc --prune=now
 $ git remote prune origin
`

# REBASE
```sh
$ git pull origin baokhiem/uat --rebase
```

# DELETE Branch from Local

ref: http://karma-runner.github.io/6.3/dev/git-commit-msg.html

# ON Windows
## When take issue: FileName is long
git config --system core.longpaths true

-------------------------------------------------------------

-------- ELASTIC INSTALLATION ----------------

-------------------------------------------------------------


---------------------------------------

-------- For Windows 10 --------

---------------------------------------

I. PreInstall requirement

1. Install python 3

Download: https://www.python.org/ftp/python/3.6.4/python-3.6.4-amd64.exe


2. Install mongo-connector

Run following command in CMD: 

-> pip3 install mongo-connector


3. Install doc-manager

Run following command in CMD as Adminmaster:

-> pip3 install 'elastic2-doc-manager[elastic5]'


4. Create Mongo replica set

- Stop mongoservice

- Run following command in CMD as ADminmaster: mongod --replSet myDevReplSet --dbpath C://data//db --port 27017 --bind_ip localhost

- Open another CMD an run the following commands:

-> mongod

-> rs.initiate() 

Attention: if "rs.initiate()" fail then run "rs.initiate({ _id: 'myDevReplSet', version: 1, members: [{ _id: 1, host: '127.0.0.1:27017', priority: 10 }] })" 

5. Install JAVA 8:

Link download (Windows x64 Offline): http://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html


II. Install Elasticsearch

1. Download and install Elasticsearch client:

https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.6.5.msi

Attention: During Installation process, in plugin screen, choose "X-Pack"


III. Run Elasticsearch

1. Start product service and navigate to "http://localhost:9763/api/elastic/index/product"


2. Start mongo-connector by run following command in CMD:

-> mongo-connector -m localhost:27017 -t elastic:changeme@127.0.0.1:9200 -d elastic2_doc_manager -n Product.product


Done!!


---------------------------------------

---------------------------------------

-------- For Ubuntu 16.04 ------

---------------------------------------

I. PreInstall requirement

1. Install python 3

-> sudo apt install python-3


2. Install mongo-connector

-> pip3 install mongo-connector


3. Install doc-manager

Run following command in CMD as Adminmaster:

-> pip3 install 'elastic2-doc-manager[elastic5]'


4. Create Mongo replica set

-> sudo service mongod stop

-> sudo mongod --replSet myDevReplSet --dbpath /var/lib/mongodb --port 27017 --bind_ip localhost

- Open another terminal an run following commands:

-> mongo

-> rs.initiate() 

Attention: if "rs.initiate()" fail then run "rs.initiate({ _id: 'myDevReplSet', version: 1, members: [{ _id: 1, host: '127.0.0.1:27017', priority: 10 }] })" 


5. Install JAVA 8:

-> sudo add-apt-repository ppa:webupd8team/java

-> sudo apt-get update

-> sudo apt-get install oracle-java8-installer


II. Install Elasticsearch

1. Download and install Elasticsearch client:

https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.6.5.tar.gz


2. Unzip

-> sudo tar -xvf elasticsearch-5.6.5.tar.gz


3. Install X-pack

-> cd elasticsearch-5.6.5

-> ./bin/elasticsearch-plugin install x-pack


III. Run Elasticsearch

1. Start elasticsearch service

-> ./bin/elasticsearch &


2. Start product service and navigate to "http://localhost:9763/api/elastic/index/product"


3. Start mongo-connector by run following command in CMD:

-> mongo-connector -m localhost:27017 -t elastic:changeme@127.0.0.1:9200 -d elastic2_doc_manager -n Product.product


Done!!


####
Install Chocolatey with powershell.exe
#######

Run 
    `Get-ExecutionPolicy`. 
If it returns Restricted, 
then run 
    `Set-ExecutionPolicy AllSigned` OR
    `Set-ExecutionPolicy Bypass -Scope Process`.

 > Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))`



-------------------------------------------------------------

-------- REMMINA remote Ubuntu ------------

-------------------------------------------------------------

sudo apt-add-repository ppa:remmina-ppa-team/remmina-next
sudo apt update
sudo apt install remmina remmina-plugin-rdp remmina-plugin-secret


-- # Cant access google.com

# configuration

$ sudo vim /etc/resolv.conf

# add preferred DNS server

nameserver 208.67.222.222


-- # allow remote access to mysql

# Navigate to bind-address, and change to 0.0.0.0

$ sudo nano /etc/mysql/mysql.conf.d/mysqld.cnf

$ sudo systemctl restart mysql

$ sudo mysql

# rename use

sql> RENAME USER 'devmy'@'localhost' TO 'devmy'@'remote_server_ip';

# or create user

sql> CREATE USER 'sammy'@'remote_server_ip' IDENTIFIED BY 'password';

# grant permission

sql> GRANT CREATE, ALTER, DROP, INSERT, UPDATE, DELETE, SELECT, REFERENCES, RELOAD on *.* TO 'sammy'@'remote_server_ip' WITH GRANT OPTION;

# free up

sql> FLUSH PRIVILEGES;


# Sometime you have the issue which related to registry on Nodejs

``sh

npm get registry
npm set registry https://registry.npmjs.org/
```


III. AWS


# Create a Config Profile > aws configure --profile profile_name


# ModifyProfile
> aws configure set aws_access_key_id NEW_ACCESS_KEY_ID --profile profile_name

#
  1. Terraform VPC AWS
    1. Networking, sub-net, nat (single, multiple), security group, subnet,…
    2. Route 53, firewall (WAF), 
  2. Terraform EC2
  3. Terraform EKS, RDS, S3
  4. Sonaqube
    1. Install ?-> can be able to deploy it via K8S
    2. ELK is required?
    3. Need Postgres only
  5. K8S (AKS) should prefer course
    1. POD, Deployment, Service, Ingress, load balancer (Isito), Network of policy 
    2. Helm
    3. Certificate to add ssl
    4. Auto scaling
  6. Monitoring (
    1. Grafana, Prometheus 
    2. Storing data -> sdd, s3
  7. Argo CD
 


-------------------------------------------------------------

-------- PRODUCTS DB PATTERN ------------

-------------------------------------------------------------


 +---------------+     +-------------------+
 | PRODUCTS      |-----< PRODUCT_VARIANTS  |
 +---------------+     +-------------------+
 | #product_id   |     | #product_id       |
 |  product_name |     | #variant_id       |
 +---------------+     |  sku_id           |
         |             +-------------------+
         |                       |          
+--------^--------+     +--------^--------+ 
| PRODUCT_OPTIONS |-----< VARIANT_VALUES  | 
+-----------------+     +-----------------+ 
| #product_id     |     | #product_id     | 
| #option_id      |     | #variant_id     | 
+--------v--------+     | #option_id      | 
         |              |  value_id       | 
+-----------------+     +--------v--------+ 
| OPTIONS         |              |          
+-----------------+              |          
| #option_id      |              |          
|  option_name    |              |          
+-----------------+              |          
         |                       |          
 +-------^-------+               |          
 | OPTION_VALUES |---------------+          
 +---------------+                          
 | #option_id    |                          
 | #value_id     |                          
 |  value_name   |                          
 +---------------+    

PRODUCTS
========
product_id product_name
---------- ------------
1          Widget 1
2          Widget 2
3          Widget 3
 
OPTIONS
=======
option_id option_name
--------- -----------
1         Size SL
2         Color
3         Size SM
4         Class
5         Size ML
 
OPTION_VALUES
=============
option_id value_id value_name
--------- -------- ------------
1         1        Small        (Size SL)
1         2        Large        (Size SL)
2         1        White        (Color)
2         2        Black        (Color)
3         1        Small        (Size SM)
3         2        Medium       (Size SM)
4         1        Amateur      (Class)
4         2        Professional (Class)
5         1        Medium       (Size ML)
5         2        Large        (Size ML)
 
PRODUCT_OPTIONS
===============
product_id option_id
---------- ---------
1          1         (Widget 1; Size SL)
1          2         (Widget 1; Color)
2          3         (Widget 2; Size SM)
3          4         (Widget 3; Class)
3          5         (Widget 4; Size ML)
 
PRODUCT_VARIANTS
================
product_id variant_id sku_id
---------- ---------- ------
1          1          W1SSCW (Widget 1)
1          2          W1SSCB (Widget 1)
1          3          W1SLCW (Widget 1)
1          4          W1SLCB (Widget 1)
2          1          W2SS   (Widget 2)
2          2          W2SM   (Widget 2)
3          1          W3CASM (Widget 3)
3          2          W3CASL (Widget 3)
3          3          W3CPSM (Widget 3)
3          4          W3CPSL (Widget 3)
 
VARIANT_VALUES
==============
product_id variant_id option_id value_id
---------- ---------- --------- --------
1          1          1         1        (W1SSCW; Size SL; Small)
1          1          2         1        (W1SSCW; Color; White)
1          2          1         1        (W1SSCB; Size SL; Small)
1          2          2         2        (W1SSCB; Color; Black)
1          3          1         2        (W1SLCW; Size SL; Large)
1          3          2         1        (W1SLCW; Color; White)
1          4          1         2        (W1SLCB; Size SL; Large)
1          4          2         2        (W1SLCB; Color; Black)
2          1          3         1        (W2SS; Size SM; Small)
2          2          3         2        (W2SM; Size SM; Medium)
3          1          4         1        (W3CASM; Class; Amateur)
3          1          5         1        (W3CASM; Size ML; Medium)
3          2          4         1        (W3CASL; Class; Amateur)
3          2          5         2        (W3CASL; Size ML; Large)
3          3          4         2        (W3CPSM; Class; Professional)
3          3          5         1        (W3CPSM; Size ML; Medium)
3          4          4         2        (W3CPSL; Class; Professional)
3          4          5         2        (W3CPSL; Size ML; Large)

======================================
K8S what you will need is

AWS account 
virtual machine 
putty or any ssh client

##TERRAFORM INSTALLATION

sudo apt-get update && sudo apt-get install -y gnupg software-properties-common

wget -O- https://apt.releases.hashicorp.com/gpg | \
    gpg --dearmor | \
    sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg

gpg --no-default-keyring \
    --keyring /usr/share/keyrings/hashicorp-archive-keyring.gpg \
    --fingerprint

echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] \
    https://apt.releases.hashicorp.com $(lsb_release -cs) main" | \
    sudo tee /etc/apt/sources.list.d/hashicorp.list

sudo apt update

sudo apt-get install terraform

#AWS CLI

apt install unzip

curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install

sudo ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update

aws configure

git clone https://github.com/hashicorp/learn-terraform-provision-eks-cluster

cd learn-terraform-provision-eks-cluster

#comment cloud configuration in terrform.tf

terraform init

terraform apply

curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"

snap install kubectl --classic
kubectl version --client

aws eks --region $(terraform output -raw region) update-kubeconfig \
    --name $(terraform output -raw cluster_name)

kubectl cluster-info
kubectl get nodes

kubectl create namespace argocd
   

kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml

kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}' kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo kubectl delete -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml kubectl delete namespace argocd terraform destroy --auto-approve

The OCR Service to extract the Text Data

Optical character recognition, or OCR, is a key tool for people who want to build or collect text data. OCR uses machine learning to extract...