Установить PostgreSQL версии 15 по инструкции с официального сайта: https://www.postgresql.org/download
Выполнить тонкую настройку в postgresql.conf по рекомендациям с https://www.pgconfig.org, введя выделенные ресурсы, а так же привести значение шифрования пароля к password_encryption = scram-sha-256 или md5.
Выполнить настройку файла pg_hba.conf, указав:
<type> <database> <user> <address> <method>
где:
type - параметр, указывающий тип подключения (local, host, hostssl, hostnossl, hostgssenc, hostnogssenc)
database - параметр, указывающий к какой БД разрешено подключаться, так же может быть all
user- параметр, указывающий каким пользователем можно подключаться, так же может быть all
address - параметр, указывающий c какого IP адреса или подсети можно подключаться, так же может быть *
method - параметр, указывающий тип шифрования, в нашем случае scram-sha-256 или md5, смотря какой тип был указан в postgresql.conf
Перезапустить службу PostgreSQL
systemctl restart postgresql-15.service
Переключиться в пользователя postgres и создать роль и необходимые базы на сервере:
sudo su - postgres
psql
CREATE ROLE m2 WITH SUPERUSER LOGIN PASSWORD 'superPassw0rd';
CREATE DATABASE m2 WITH OWNER m2;
CREATE DATABASE keycloak WITH OWNER m2;
\c m2;
CREATE SCHEMA m2;
exit
Для установки потребуется программное обеспечение для автоматизации развёртывания и управления приложениями в среде контейнеризации - Docker. Установить, для вашей платформы, можно по инструкции https://docs.docker.com/engine/install/ или любым другим способом, также, необходимым пакетом будет являться docker-compose.
Все необходимые образы будут переданы. Чтобы их загрузить необходимо выполнить команду:
docker load -i <image_name>.tar
mkdir -p /opt/semanticiam && cd $_
FROM nexus.sdi-solution.ru/infra/keycloak:25.0.0-core AS builder
# Configure postgres database vendor
ENV KC_DB=postgres
ENV KC_FEATURES="token-exchange,scripts,preview"
ENV KC_FEATURES_DISABLED="organization"
WORKDIR /opt/keycloak
COPY kc-m2-provider.json providers/kc-m2-provider.json
# COPY sdi.keytab sdi.keytab
RUN /opt/keycloak/bin/kc.sh build --health-enabled=true --metrics-enabled=true
FROM nexus.sdi-solution.ru/infra/keycloak:25.0.0-core AS final
LABEL image.version=25.0.0
COPY --from=builder /opt/keycloak/ /opt/keycloak/
# https://github.com/keycloak/keycloak/issues/19185#issuecomment-1480763024
USER root
RUN sed -i '/disabledAlgorithms/ s/ SHA1,//' /etc/crypto-policies/back-ends/java.config
USER keycloak
RUN /opt/keycloak/bin/kc.sh show-config
ENTRYPOINT ["/opt/keycloak/bin/kc.sh"]
[INPUT]
Name forward
Listen 0.0.0.0
Port 24224
Tag_Prefix kcEvent.
[OUTPUT]
Name opensearch
Match kcEvent.<wf_instance_name>
Host <opensearch_ip>
Port <opensearch_port>
HTTP_User <user>
HTTP_Passwd <password>
Index user_journal
Generate_ID On
Suppress_Type_Name On
Logstash_Format Off
wf_instance_name - имя. обычно соответствует названию проекта
opensearch_ip - ip адрес сервера opensearch
opensearch_port - порт сервера opensearch
user - пользователь opensearch (admin)
password - пароль пользователя opensearch (admin)
{
"realms": {
"<realm_name>": {
"clientIds": [
"<front_client>",
"<back_client>"
],
"backend": "<semantic_uri>",
"useSpnego": <true/false>
}
},
"flbHost": "<fluent_ip>",
"flbPort": 24222
}
realm_name - название реалма в Keycloak
front_client - имя front клиента, такое же указать и в скрипте создания реалма
back_client - имя back клиента, такое же указать и в скрипте создания реалма
semantic_uri - адрес Semantic
fluent_ip - IP адрес fluent-bit
useSpnego - включает/выключает видимость кнопки войти через Windows
services:
keycloak:
container_name: keycloak
build:
context: .
command: ['start', '--optimized', '--spi-required-action-signal-m2-auth-settings=/opt/keycloak/providers/kc-m2-provider.json', '--spi-events-listener-m2-event-listener-settings=/opt/keycloak/providers/kc-m2-provider.json', '--spi-storage-m2-db-user-federation-settings=/opt/keycloak/providers/kc-m2-provider.json']
environment:
JAVA_OPTS_APPEND: -Dkeycloak.profile.feature.upload_scripts=enabled -agentlib:jdwp=transport=dt_socket,address=keycloak:8787,server=y,suspend=n
KC_DB: postgres
KC_DB_URL: ""
KC_DB_USERNAME:
KC_DB_PASSWORD:
KC_HOSTNAME:
# KC_HOSTNAME_PORT: 8080
KC_HOSTNAME_URL:
# KC_HTTP_RELATIVE_PATH: /
# KC_HOSTNAME_ADMIN_URL:
KC_HTTP_ENABLED: true
KC_PROXY_HEADERS: xforwarded
# KC_HOSTNAME_STRICT: false
# KC_HOSTNAME_STRICT_HTTPS: false
# KC_LOG_LEVEL: info
# KC_LOG_LEVEL: debug
KC_METRICS_ENABLED: true
KC_HEALTH_ENABLED: true
KEYCLOAK_ADMIN:
KEYCLOAK_ADMIN_PASSWORD:
ports:
- 9080:8080
- 9787:8787
networks:
- kc
fluent-bit:
container_name: fluent-bit
image: fluent/fluent-bit
ports:
- 24222:24224
volumes:
- ./fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
networks:
- keycloak
networks:
keycloak:
name: keycloak
hostname: указать FQDN, который должен совпадать с KC_HOSTNAME, KC_HOSTNAME_URL
KC_HOSTNAME: указать FQDN, который должен совпадать с hostname, KC_HOSTNAME_URL
KC_DB_URL: "jdbc:postgresql://<DB_IP>:<DB_PORT>/<DB_NAME>" - параметры подключения к БД
KC_DB_USERNAME: Пользователь БД с доступом к бд keycloak
KC_DB_PASSWORD: Пароль пользователя БД с доступом к бд keycloak
KC_HOSTNAME_URL: http:// | https:// - Полный адрес ресурса, по которому будет доступен Keycloak (необходимо добавить DNS запись и конфигурацию в Nginx, чувствительная настройка)
KC_HTTP_RELATIVE_PATH: / (дефолтное значение), раскомментировать и написать нужный location, если нужно повесить на существующую запись, например https://test.sdi-solution.ru/keycloak
KEYCLOAK_ADMIN: Пользователь, который создастся в Keycloak c администраторскими правами
KEYCLOAK_ADMIN_PASSWORD: Пароль пользователя, который создастся в Keycloak c администраторскими правами
docker compose build
docker compose up -d
touch semantic-realm.sh && chmod +x semantic-realm.sh
#!/bin/bash
keycloakURI=
semanticURI=
semanticHTTP=
adminRealm=master
semanticRealm=
adminClient=admin-cli
frontClient=sdi-web-client
backClient=sdi-core-client
backSecret=
mlClient=sdi-ml-client
mlSecret=
username=
userpass=
systemRoles="read,editData,editMetadata,sysAdministration,exportPatterns,viewHistory,manageApplications,editDataWithoutApplications,runReports,rootNodeAccess,readSecurity,manageSecurity,readUserJournal,similaritySearch,editExpirables,sudo,viceSetup"
# Get bearer token for other Web API calls
token=$(curl --silent -X POST "${keycloakURI}/realms/${adminRealm}/protocol/openid-connect/token" \
--header "Content-Type: application/x-www-form-urlencoded" \
--data "client_id=${adminClient}" \
--data "username=${username}" \
--data "password=${userpass}" \
--data "grant_type=password" | jq -r .access_token)
# Pass token for create a predefined realm for Semantic use
curl -X POST "${keycloakURI}/admin/realms" \
--header "Authorization: Bearer ${token}" \
--header "Content-Type: application/json" \
--data '{
"id": "'"${semanticRealm}"'",
"realm": "'"${semanticRealm}"'",
"loginTheme": "semantic-mdm",
"notBefore": 0,
"defaultSignatureAlgorithm": "RS256",
"revokeRefreshToken": false,
"refreshTokenMaxReuse": 0,
"accessTokenLifespan": 300,
"accessTokenLifespanForImplicitFlow": 900,
"ssoSessionIdleTimeout": 1800,
"ssoSessionMaxLifespan": 36000,
"ssoSessionIdleTimeoutRememberMe": 0,
"ssoSessionMaxLifespanRememberMe": 0,
"offlineSessionIdleTimeout": 2592000,
"offlineSessionMaxLifespanEnabled": false,
"offlineSessionMaxLifespan": 5184000,
"accessCodeLifespan": 60,
"accessCodeLifespanUserAction": 300,
"accessCodeLifespanLogin": 1800,
"actionTokenGeneratedByAdminLifespan": 43200,
"actionTokenGeneratedByUserLifespan": 300,
"enabled": true,
"sslRequired": "external",
"registrationAllowed": false,
"registrationEmailAsUsername": false,
"rememberMe": false,
"verifyEmail": false,
"loginWithEmailAllowed": false,
"duplicateEmailsAllowed": true,
"resetPasswordAllowed": false,
"editUsernameAllowed": false,
"bruteForceProtected": false,
"permanentLockout": false,
"maxTemporaryLockouts": 0,
"maxFailureWaitSeconds": 900,
"minimumQuickLoginWaitSeconds": 60,
"waitIncrementSeconds": 60,
"quickLoginCheckMilliSeconds": 1000,
"maxDeltaTimeSeconds": 43200,
"failureFactor": 30,
"passwordPolicy": "m2-password-policy",
"eventsListeners": [
"m2-event-listener",
"jboss-logging"
],
"requiredActions": [
{
"alias": "CONFIGURE_TOTP",
"name": "Configure OTP",
"providerId": "CONFIGURE_TOTP",
"enabled": true,
"defaultAction": false,
"priority": 10,
"config": {}
},
{
"alias": "TERMS_AND_CONDITIONS",
"name": "Terms and Conditions",
"providerId": "TERMS_AND_CONDITIONS",
"enabled": false,
"defaultAction": false,
"priority": 20,
"config": {}
},
{
"alias": "UPDATE_PASSWORD",
"name": "Update Password",
"providerId": "UPDATE_PASSWORD",
"enabled": true,
"defaultAction": false,
"priority": 30,
"config": {}
},
{
"alias": "UPDATE_PROFILE",
"name": "Update Profile",
"providerId": "UPDATE_PROFILE",
"enabled": true,
"defaultAction": false,
"priority": 40,
"config": {}
},
{
"alias": "VERIFY_EMAIL",
"name": "Verify Email",
"providerId": "VERIFY_EMAIL",
"enabled": false,
"defaultAction": false,
"priority": 50,
"config": {}
},
{
"alias": "delete_account",
"name": "Delete Account",
"providerId": "delete_account",
"enabled": false,
"defaultAction": false,
"priority": 60,
"config": {}
},
{
"alias": "CONFIGURE_RECOVERY_AUTHN_CODES",
"name": "Recovery Authentication Codes",
"providerId": "CONFIGURE_RECOVERY_AUTHN_CODES",
"enabled": true,
"defaultAction": false,
"priority": 70,
"config": {}
},
{
"alias": "UPDATE_EMAIL",
"name": "Update Email",
"providerId": "UPDATE_EMAIL",
"enabled": true,
"defaultAction": false,
"priority": 70,
"config": {}
},
{
"alias": "webauthn-register",
"name": "Webauthn Register",
"providerId": "webauthn-register",
"enabled": true,
"defaultAction": false,
"priority": 70,
"config": {}
},
{
"alias": "webauthn-register-passwordless",
"name": "Webauthn Register Passwordless",
"providerId": "webauthn-register-passwordless",
"enabled": true,
"defaultAction": false,
"priority": 80,
"config": {}
},
{
"alias": "VERIFY_PROFILE",
"name": "Verify Profile",
"providerId": "VERIFY_PROFILE",
"enabled": false,
"defaultAction": false,
"priority": 90,
"config": {}
},
{
"alias": "delete_credential",
"name": "Delete Credential",
"providerId": "delete_credential",
"enabled": true,
"defaultAction": false,
"priority": 100,
"config": {}
},
{
"alias": "update_user_locale",
"name": "Update User Locale",
"providerId": "update_user_locale",
"enabled": true,
"defaultAction": false,
"priority": 1000,
"config": {}
},
{
"alias": "m2-update-password",
"name": "Обновить пароль",
"providerId": "m2-update-password",
"enabled": true,
"defaultAction": false,
"priority": 1001,
"config": {}
}
],
"components": {
"org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy": [
{
"name": "Allowed Protocol Mapper Types",
"providerId": "allowed-protocol-mappers",
"subType": "anonymous",
"subComponents": {},
"config": {
"allowed-protocol-mapper-types": [
"oidc-usermodel-property-mapper",
"oidc-sha256-pairwise-sub-mapper",
"saml-user-attribute-mapper",
"oidc-usermodel-attribute-mapper",
"saml-user-property-mapper",
"oidc-full-name-mapper",
"oidc-address-mapper",
"saml-role-list-mapper"
]
}
},
{
"name": "Trusted Hosts",
"providerId": "trusted-hosts",
"subType": "anonymous",
"subComponents": {},
"config": {
"host-sending-registration-request-must-match": [
"true"
],
"client-uris-must-match": [
"true"
]
}
},
{
"name": "Full Scope Disabled",
"providerId": "scope",
"subType": "anonymous",
"subComponents": {},
"config": {}
},
{
"name": "Allowed Protocol Mapper Types",
"providerId": "allowed-protocol-mappers",
"subType": "authenticated",
"subComponents": {},
"config": {
"allowed-protocol-mapper-types": [
"saml-user-property-mapper",
"oidc-sha256-pairwise-sub-mapper",
"oidc-full-name-mapper",
"saml-role-list-mapper",
"oidc-usermodel-property-mapper",
"oidc-address-mapper",
"saml-user-attribute-mapper",
"oidc-usermodel-attribute-mapper"
]
}
},
{
"name": "Allowed Client Scopes",
"providerId": "allowed-client-templates",
"subType": "anonymous",
"subComponents": {},
"config": {
"allow-default-scopes": [
"true"
]
}
},
{
"name": "Max Clients Limit",
"providerId": "max-clients",
"subType": "anonymous",
"subComponents": {},
"config": {
"max-clients": [
"200"
]
}
},
{
"name": "Consent Required",
"providerId": "consent-required",
"subType": "anonymous",
"subComponents": {},
"config": {}
},
{
"name": "Allowed Client Scopes",
"providerId": "allowed-client-templates",
"subType": "authenticated",
"subComponents": {},
"config": {
"allow-default-scopes": [
"true"
]
}
}
],
"org.keycloak.userprofile.UserProfileProvider": [
{
"providerId": "declarative-user-profile",
"config": {
"kc.user.profile.config": [
"{\"attributes\":[{\"name\":\"username\",\"displayName\":\"${username}\",\"validations\":{},\"annotations\":{},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false},{\"name\":\"email\",\"displayName\":\"${email}\",\"validations\":{},\"annotations\":{},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false},{\"name\":\"firstName\",\"displayName\":\"${firstName}\",\"validations\":{},\"annotations\":{},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false},{\"name\":\"lastName\",\"displayName\":\"${lastName}\",\"validations\":{},\"annotations\":{},\"required\":{\"roles\":[\"user\"]},\"permissions\":{\"view\":[\"admin\",\"user\"],\"edit\":[\"admin\",\"user\"]},\"multivalued\":false}],\"groups\":[{\"name\":\"user-metadata\",\"displayHeader\":\"User metadata\",\"displayDescription\":\"Attributes, which refer to user metadata\"}],\"unmanagedAttributePolicy\":\"ENABLED\"}"
]
}
}
],
"org.keycloak.storage.UserStorageProvider": [
{
"name": "m2-db-user-federation",
"providerId": "m2-db-user-federation",
"subComponents": {},
"config": {
"cachePolicy": [
"DEFAULT"
],
"enabled": [
"true"
]
}
}
]
},
"clients": [
{
"clientId": "'"${backClient}"'",
"name": "'"${backClient}"'",
"description": "Client for SemanticMDM",
"rootUrl": "'"${keycloakURI}"'",
"adminUrl": "'"${keycloakURI}"'",
"baseUrl": "'"${semanticURI}"'",
"surrogateAuthRequired": false,
"enabled": true,
"alwaysDisplayInConsole": false,
"clientAuthenticatorType": "client-secret",
"secret": "'"${backSecret}"'",
"redirectUris": [
"'"${keycloakURI}"'/*",
"'"${semanticURI}"'/*",
"'"${semanticHTTP}"'/*"
],
"webOrigins": [
"*"
],
"notBefore": 0,
"bearerOnly": false,
"consentRequired": false,
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"serviceAccountsEnabled": true,
"authorizationServicesEnabled": true,
"publicClient": false,
"frontchannelLogout": true,
"protocol": "openid-connect",
"attributes": {
"oidc.ciba.grant.enabled": "false",
"client.secret.creation.time": "1727124444",
"backchannel.logout.session.required": "true",
"post.logout.redirect.uris": "+",
"display.on.consent.screen": "false",
"oauth2.device.authorization.grant.enabled": "false",
"backchannel.logout.revoke.offline.tokens": "false"
},
"authenticationFlowBindingOverrides": {},
"fullScopeAllowed": false,
"nodeReRegistrationTimeout": -1,
"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"group-mapper",
"basic",
"email",
"ip-mapper"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"organization",
"microprofile-jwt"
],
"protocolMappers": [
{
"name": "Client ID",
"protocol": "openid-connect",
"protocolMapper": "oidc-usersessionmodel-note-mapper",
"consentRequired": false,
"config": {
"user.session.note": "client_id",
"id.token.claim": "true",
"introspection.token.claim": "true",
"access.token.claim": "true",
"claim.name": "client_id",
"jsonType.label": "String"
}
}
]
},
{
"clientId": "'"${mlClient}"'",
"name": "'"${mlClient}"'",
"description": "Client for SemanticMDM",
"rootUrl": "'"${keycloakURI}"'",
"adminUrl": "'"${keycloakURI}"'",
"baseUrl": "'"${semanticURI}"'",
"surrogateAuthRequired": false,
"enabled": true,
"alwaysDisplayInConsole": false,
"clientAuthenticatorType": "client-secret",
"secret": "'"${mlSecret}"'",
"redirectUris": [
"'"${keycloakURI}"'/*",
"'"${semanticURI}"'/*",
"'"${semanticHTTP}"'/*"
],
"webOrigins": [
"*"
],
"notBefore": 0,
"bearerOnly": false,
"consentRequired": false,
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"serviceAccountsEnabled": true,
"authorizationServicesEnabled": true,
"publicClient": false,
"frontchannelLogout": true,
"protocol": "openid-connect",
"attributes": {
"oidc.ciba.grant.enabled": "false",
"client.secret.creation.time": "1727124444",
"backchannel.logout.session.required": "true",
"post.logout.redirect.uris": "+",
"display.on.consent.screen": "false",
"oauth2.device.authorization.grant.enabled": "false",
"backchannel.logout.revoke.offline.tokens": "false"
},
"authenticationFlowBindingOverrides": {},
"fullScopeAllowed": true,
"nodeReRegistrationTimeout": -1,
"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"group-mapper",
"basic",
"email",
"ip-mapper"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"organization",
"microprofile-jwt"
],
"protocolMappers": [
{
"name": "Client ID",
"protocol": "openid-connect",
"protocolMapper": "oidc-usersessionmodel-note-mapper",
"consentRequired": false,
"config": {
"user.session.note": "client_id",
"id.token.claim": "true",
"introspection.token.claim": "true",
"access.token.claim": "true",
"claim.name": "client_id",
"jsonType.label": "String"
}
}
]
},
{
"clientId": "'"${frontClient}"'",
"name": "'"${frontClient}"'",
"description": "Client for SemanticMDM",
"rootUrl": "'"${semanticURI}"'",
"adminUrl": "'"${semanticURI}"'",
"baseUrl": "'"${semanticURI}"'",
"surrogateAuthRequired": false,
"enabled": true,
"alwaysDisplayInConsole": false,
"clientAuthenticatorType": "client-secret",
"redirectUris": [
"'"${keycloakURI}"'/*",
"'"${semanticURI}"'/*",
"'"${semanticHTTP}"'/*"
],
"webOrigins": [
"*"
],
"notBefore": 0,
"bearerOnly": false,
"consentRequired": false,
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"serviceAccountsEnabled": false,
"publicClient": true,
"frontchannelLogout": true,
"protocol": "openid-connect",
"attributes": {
"oidc.ciba.grant.enabled": "false",
"backchannel.logout.session.required": "true",
"post.logout.redirect.uris": "+",
"display.on.consent.screen": "false",
"oauth2.device.authorization.grant.enabled": "false",
"backchannel.logout.revoke.offline.tokens": "false"
},
"authenticationFlowBindingOverrides": {},
"fullScopeAllowed": true,
"nodeReRegistrationTimeout": -1,
"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"group-mapper",
"basic",
"email",
"ip-mapper"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"organization",
"microprofile-jwt"
]
}
],
"clientScopes": [
{
"name": "saml_organization",
"description": "Organization Membership",
"protocol": "saml",
"attributes": {
"display.on.consent.screen": "false"
},
"protocolMappers": [
{
"name": "organization",
"protocol": "saml",
"protocolMapper": "saml-organization-membership-mapper",
"consentRequired": false,
"config": {}
}
]
},
{
"name": "profile",
"description": "OpenID Connect built-in scope: profile",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"consent.screen.text": "${profileScopeConsentText}",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "website",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "website",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "website",
"jsonType.label": "String"
}
},
{
"name": "zoneinfo",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "zoneinfo",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "zoneinfo",
"jsonType.label": "String"
}
},
{
"name": "picture",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "picture",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "picture",
"jsonType.label": "String"
}
},
{
"name": "given name",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "firstName",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "given_name",
"jsonType.label": "String"
}
},
{
"name": "gender",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "gender",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "gender",
"jsonType.label": "String"
}
},
{
"name": "middle name",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "middleName",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "middle_name",
"jsonType.label": "String"
}
},
{
"name": "locale",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "locale",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "locale",
"jsonType.label": "String"
}
},
{
"name": "groups",
"protocol": "openid-connect",
"protocolMapper": "oidc-group-membership-mapper",
"consentRequired": false,
"config": {
"full.path": "false",
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"multivalued": "true",
"id.token.claim": "true",
"lightweight.claim": "false",
"access.token.claim": "true",
"claim.name": "groups"
}
},
{
"name": "birthdate",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "birthdate",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "birthdate",
"jsonType.label": "String"
}
},
{
"name": "username",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "username",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "preferred_username",
"jsonType.label": "String"
}
},
{
"name": "updated at",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "updatedAt",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "updated_at",
"jsonType.label": "long"
}
},
{
"name": "family name",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "lastName",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "family_name",
"jsonType.label": "String"
}
},
{
"name": "profile",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "profile",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "profile",
"jsonType.label": "String"
}
},
{
"name": "full name",
"protocol": "openid-connect",
"protocolMapper": "oidc-full-name-mapper",
"consentRequired": false,
"config": {
"id.token.claim": "true",
"introspection.token.claim": "true",
"access.token.claim": "true",
"userinfo.token.claim": "true"
}
},
{
"name": "nickname",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "nickname",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "nickname",
"jsonType.label": "String"
}
}
]
},
{
"name": "microprofile-jwt",
"description": "Microprofile - JWT built-in scope",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"display.on.consent.screen": "false"
},
"protocolMappers": [
{
"name": "groups",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-realm-role-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"multivalued": "true",
"user.attribute": "foo",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "groups",
"jsonType.label": "String"
}
},
{
"name": "upn",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "username",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "upn",
"jsonType.label": "String"
}
}
]
},
{
"name": "roles",
"description": "OpenID Connect scope for add user roles to the access token",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "false",
"consent.screen.text": "${rolesScopeConsentText}",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "audience resolve",
"protocol": "openid-connect",
"protocolMapper": "oidc-audience-resolve-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"access.token.claim": "true"
}
},
{
"name": "realm roles",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-realm-role-mapper",
"consentRequired": false,
"config": {
"user.attribute": "foo",
"introspection.token.claim": "true",
"access.token.claim": "true",
"claim.name": "realm_access.roles",
"jsonType.label": "String",
"multivalued": "true"
}
},
{
"name": "client roles",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-client-role-mapper",
"consentRequired": false,
"config": {
"user.attribute": "foo",
"introspection.token.claim": "true",
"access.token.claim": "true",
"claim.name": "resource_access.${client_id}.roles",
"jsonType.label": "String",
"multivalued": "true"
}
}
]
},
{
"name": "role_list",
"description": "SAML role list",
"protocol": "saml",
"attributes": {
"consent.screen.text": "${samlRoleListScopeConsentText}",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "role list",
"protocol": "saml",
"protocolMapper": "saml-role-list-mapper",
"consentRequired": false,
"config": {
"single": "false",
"attribute.nameformat": "Basic",
"attribute.name": "Role"
}
}
]
},
{
"name": "email",
"description": "OpenID Connect built-in scope: email",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"consent.screen.text": "${emailScopeConsentText}",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "email",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "email",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "email",
"jsonType.label": "String"
}
},
{
"name": "email verified",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-property-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "emailVerified",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "email_verified",
"jsonType.label": "boolean"
}
}
]
},
{
"name": "web-origins",
"description": "OpenID Connect scope for add allowed web origins to the access token",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "false",
"consent.screen.text": "",
"display.on.consent.screen": "false"
},
"protocolMappers": [
{
"name": "allowed web origins",
"protocol": "openid-connect",
"protocolMapper": "oidc-allowed-origins-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"access.token.claim": "true"
}
}
]
},
{
"name": "address",
"description": "OpenID Connect built-in scope: address",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"consent.screen.text": "${addressScopeConsentText}",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "address",
"protocol": "openid-connect",
"protocolMapper": "oidc-address-mapper",
"consentRequired": false,
"config": {
"user.attribute.formatted": "formatted",
"user.attribute.country": "country",
"introspection.token.claim": "true",
"user.attribute.postal_code": "postal_code",
"userinfo.token.claim": "true",
"user.attribute.street": "street",
"id.token.claim": "true",
"user.attribute.region": "region",
"access.token.claim": "true",
"user.attribute.locality": "locality"
}
}
]
},
{
"name": "phone",
"description": "OpenID Connect built-in scope: phone",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"consent.screen.text": "${phoneScopeConsentText}",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "phone number",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "phoneNumber",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "phone_number",
"jsonType.label": "String"
}
},
{
"name": "phone number verified",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-attribute-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"userinfo.token.claim": "true",
"user.attribute": "phoneNumberVerified",
"id.token.claim": "true",
"access.token.claim": "true",
"claim.name": "phone_number_verified",
"jsonType.label": "boolean"
}
}
]
},
{
"name": "offline_access",
"description": "OpenID Connect built-in scope: offline_access",
"protocol": "openid-connect",
"attributes": {
"consent.screen.text": "${offlineAccessScopeConsentText}",
"display.on.consent.screen": "true"
}
},
{
"name": "acr",
"description": "OpenID Connect scope for add acr (authentication context class reference) to the token",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "false",
"display.on.consent.screen": "false"
},
"protocolMappers": [
{
"name": "acr loa level",
"protocol": "openid-connect",
"protocolMapper": "oidc-acr-mapper",
"consentRequired": false,
"config": {
"id.token.claim": "true",
"introspection.token.claim": "true",
"access.token.claim": "true"
}
}
]
},
{
"name": "basic",
"description": "OpenID Connect scope for add all basic claims to the token",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "false",
"display.on.consent.screen": "false"
},
"protocolMappers": [
{
"name": "auth_time",
"protocol": "openid-connect",
"protocolMapper": "oidc-usersessionmodel-note-mapper",
"consentRequired": false,
"config": {
"user.session.note": "AUTH_TIME",
"id.token.claim": "true",
"introspection.token.claim": "true",
"access.token.claim": "true",
"claim.name": "auth_time",
"jsonType.label": "long"
}
},
{
"name": "sub",
"protocol": "openid-connect",
"protocolMapper": "oidc-sub-mapper",
"consentRequired": false,
"config": {
"introspection.token.claim": "true",
"access.token.claim": "true"
}
}
]
},
{
"name": "organization",
"description": "Additional claims about the organization a subject belongs to",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"consent.screen.text": "${organizationScopeConsentText}",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "organization",
"protocol": "openid-connect",
"protocolMapper": "oidc-organization-membership-mapper",
"consentRequired": false,
"config": {
"id.token.claim": "true",
"introspection.token.claim": "true",
"access.token.claim": "true"
}
}
]
}
],
"roles": {
"realm": [
{
"name": "sudo",
"description": "",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "sysAdministration",
"description": "Системное администрирование",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "runReports",
"description": "Формирование отчетов",
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "viewHistory",
"description": "Просмотр истории",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "rootNodeAccess",
"description": "Просмотр всех справочников",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "editExpirables",
"description": "Управление историчностью данных",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "readSecurity",
"description": "Чтение безопасности",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "manageApplications",
"description": "Администрирование журнала заявок",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "exportPatterns",
"description": "Экспорт позиций и шаблонов",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "editData",
"description": "Редактирование данных",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "editDataWithoutApplications",
"description": "Редактирование данных без заявок",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "readUserJournal",
"description": "Чтение журнала работы пользователей",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "editMetadata",
"description": "Редактирование свойств, формул и единиц измерения",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "manageSecurity",
"description": "Управление безопасностью",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "similaritySearch",
"description": "Поиск дубликатов",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "read",
"description": "Чтение данных",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "viceSetup",
"description": "Настройка своих заместителей",
"composite": false,
"clientRole": false,
"attributes": {
"roleType": [
"systemRole"
]
}
},
{
"name": "Все пользователи",
"description": "Группа, в которую входят все пользователи. Наименование и список ролей редактированию не подлежат!",
"composite": true,
"composites": {
"realm": [
"read"
]
},
"clientRole": false,
"attributes": {
"roleType": [
"userRole"
]
}
},
{
"name": "scadmins",
"description": "Базовая роль scadmin",
"composite": true,
"composites": {
"realm": [
"editData",
"similaritySearch",
"viceSetup",
"viewHistory",
"read",
"readSecurity",
"readUserJournal",
"runReports",
"manageApplications",
"editDataWithoutApplications",
"editMetadata",
"manageSecurity",
"exportPatterns",
"editExpirables",
"rootNodeAccess",
"sysAdministration",
"sudo",
"viceSetup"
]
},
"clientRole": false,
"attributes": {
"roleType": [
"userRole"
]
}
}
]
},
"scopeMappings": [
{
"client": "sdi-core-client",
"roles": [
"similaritySearch",
"editData",
"viewHistory",
"viceSetup",
"read",
"readSecurity",
"readUserJournal",
"runReports",
"manageApplications",
"editDataWithoutApplications",
"editMetadata",
"manageSecurity",
"exportPatterns",
"editExpirables",
"offline_access",
"rootNodeAccess",
"sysAdministration",
"sudo",
"uma_authorization",
"viceSetup"
]
}
],
"groups": [
{
"name": "Локальные",
"path": "/Локальные",
"attributes": {
"RWMode": [
"true"
],
"attrName": [
"groupName"
],
"attrDescription": [
"groupDesc"
]
},
"realmRoles": [],
"clientRoles": {}
},
{
"name": "LDAP",
"path": "/LDAP",
"attributes": {
"RWMode": [
"false"
],
"attrName": [
"groupName"
],
"attrDescription": [
"groupDesc"
]
},
"realmRoles": [],
"clientRoles": {}
}
]
}'
echo "===> Realm ${semanticRealm} created"
# Create user scadmin
curl -X POST "${keycloakURI}/admin/realms/${semanticRealm}/users" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" \
-d '{
"username": "scadmin",
"enabled": true,
"email": "scadmin@example.com",
"firstName": "SC",
"lastName": "Admin",
"credentials": [
{
"type": "password",
"value": "scpassword",
"temporary": false
}
]
}'
echo "===> User scadmin created"
# Get scadmin user id
userId=$(curl --silent -X GET "${keycloakURI}/admin/realms/${semanticRealm}/users?username=scadmin" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" | jq -r .[0].id)
# Get service-account id
serviceId=$(curl --silent -X GET "${keycloakURI}/admin/realms/${semanticRealm}/users?username=service-account-${backClient}" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" | jq -r .[0].id)
# Get role id "Все пользователи"
vsePolzovateliId=$(curl --silent -X GET "${keycloakURI}/admin/realms/${semanticRealm}/roles/%D0%92%D1%81%D0%B5%20%D0%BF%D0%BE%D0%BB%D1%8C%D0%B7%D0%BE%D0%B2%D0%B0%D1%82%D0%B5%D0%BB%D0%B8" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" | jq -r .id)
# Get role id "scadmins"
scadminsId=$(curl --silent -X GET "${keycloakURI}/admin/realms/${semanticRealm}/roles/scadmins" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" | jq -r .id)
# Map default realm roles with role "Все пользователи"
echo "===> Assign role Все пользователи to default-roles-${semanticRealm}"
curl -X POST "${keycloakURI}/admin/realms/${semanticRealm}/roles/default-roles-${semanticRealm}/composites" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" \
-d '[
{
"id": "'"${vsePolzovateliId}"'",
"name": "Все пользователи"
}
]'
# Convert string to array
IFS=',' read -r -a arraySystemRoles <<< "$systemRoles"
# Map user service-account with systemRoles
for roleName in "${arraySystemRoles[@]}"; do
echo "===> Assign role ${roleName} to service-account-${backClient}"
roleId=$(curl --silent -X GET "${keycloakURI}/admin/realms/${semanticRealm}/roles/${roleName}" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" | jq -r .id)
curl -X POST "${keycloakURI}/admin/realms/${semanticRealm}/users/${serviceId}/role-mappings/realm" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" \
-d '[
{
"id": "'"${roleId}"'",
"name": "'"${roleName}"'"
}
]'
done
# Map user service-account-sdi-core-client with role "Все пользователи"
echo "===> Assign role Все пользователи to service-account-${backClient}"
curl -X POST "${keycloakURI}/admin/realms/${semanticRealm}/users/${serviceId}/role-mappings/realm" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" \
-d '[
{
"id": "'"${vsePolzovateliId}"'",
"name": "Все пользователи"
}
]'
# Map user scadmin with role "scadmins"
echo "===> Assign role scadmins to scadmin"
curl -X POST "${keycloakURI}/admin/realms/${semanticRealm}/users/${userId}/role-mappings/realm" \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" \
-d '[
{
"id": "'"${scadminsId}"'",
"name": "scadmins"
}
]'
./semantic-realm.sh
перейти в web консоль администрирования, в левом верхнем углу в выпадающем списке выбрать нужный реалм,
перейти в Clients -> <backClient> -> Service account roles -> service-account-<backClient> -> Role mapping -> Assign role -> найти роль realm-admin и навесить её на пользователя,
перейти в Users, найти scadmin -> Role mapping -> Assign role -> найти роль realm-admin и навесить её на пользователя
перейти в Realm roles, найти manageSecurity, в выпадающем списке справа нажать Add associated roles -> найти роль realm-admin и навесить её на роль
перейти в Realm roles, найти readSecurity, в выпадающем списке справа нажать Add associated roles -> найти роли view-realm, view-users и навесить их на роль
Нажать "Add Ldap providers" и заполнить основные поля. Поля с заданным значением чувствительны к его изменению, поэтому, для нас является константой.
Connection URL
Bind DN
Bind credentials
Edit mode = UNSYNCED
Users DN
Username LDAP attribute
RDN LDAP attribute
User LDAP filter
Search scope = Subtree
Import users
Sync Registrations
Batch size
Periodic full sync
Periodic changed users sync
Allow Kerberos authentication
Kerberos realm
Server principal
Key tab
После создания провайдера можно в выпадающем списке "Action" нажать на "Sync all users", тем самым, проверив корректность настройки взаимодействия с LDAP, пользователи должны успешно импортироваться и появиться в списке "Users".
Name = m2-event-listener-ldap-mapper
Mapper type = m2-event-listener-ldap-mapper
Name = groups-import
Mapper type = group-ldap-mapper
LDAP Groups DN
Group Name LDAP Attribute = cn
Group Object Classes = group
Preserve Group Inheritance = On
Ignore Missing Groups = On
Membership LDAP Attribute = member
Membership LDAP Attribute = DN
Membership User LDAP Attribute = cn
LDAP Filter
Mode = READ_ONLY
Member-Of LDAP Attribute
Drop non-existing groups during sync = On
Необходимо развернуть базу Semantic MDM или создать пустую
На хосте создать и прейти в директорию /opt/semantic
mkdir -p /opt/semantic && cd $_
services:
opensearch:
image: nexus.sdi-solution.ru/infra/opensearch:1.3.2
container_name: opensearch
hostname: opensearch
restart: always
networks:
- semantic-network
ports:
- 9200:9200
environment:
- node.name=opensearch-node1
- cluster.name=opensearch-cluster
- "OPENSEARCH_JAVA_OPTS=-Xms1G -Xmx1G"
- http.port=9200
- discovery.type=single-node
- bootstrap.memory_lock=true
- plugins.security.ssl.http.enabled=false
- indices.query.bool.max_clause_count=100000
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /etc/localtime:/etc/localtime:ro
- opensearch-data:/usr/share/opensearch/data
wildfly:
image: nexus.sdi-solution.ru/semantic/wildfly:devel
container_name: wildfly
restart: always
networks:
- semantic-network
ports:
- 8080:8080
- 8443:8443
- 8990:9990
- 8787:8787
environment:
- VERSION=25.1.0
- DEBUG=enable
- MAIN_SERVICE_ROUTE=
- HOST_NAME=
# === database === #
- DATABASE_VENDOR="POSTGRESQL"
- DATABASE_DRIVER="postgres"
- DATABASE_CONN=""
- DATABASE_USER=
- DATABASE_PASS=
# === opensearch === #
- OPENSEARCH_URI=
- OPENSEARCH_USER=admin
- OPENSEARCH_PASS=admin
# === opensearch_uj === #
- USER_JOURNAL=enable
- OPENSEARCH_UJ_FLUENT_URI=
- OPENSEARCH_UJ_FLUENT_TAG=<project_name>_user_journal
- OPENSEARCH_UJ_URI=
- OPENSEARCH_UJ_USER=admin
- OPENSEARCH_UJ_PASS=admin
- OPENSEARCH_UJ_REPLICAS=0
# === keycloak === #
- PROVIDER_URL=
- UI_CLIENT=
- CLIENT=
- CLIENT_SECRET=
- PUBLIC_CLIENT=
- KC_ADMIN_URL=
- KC_ADMIN_REALM=
# === metrics === #
- OPENTELEMETRY=
- OTEL_URI=
- OTEL_SERVICE_NAME=wildfly_<project_name>
# === logs === #
- FLUENT_LOGGER=enable
- FLUENT_LOGGER_HOST=10.100.0.12
- FLUENT_LOGGER_PORT=24225
- FLUENT_LOGGER_TAG=test-wildfly
# === trusted domains === #
- SECURITY_DOMAIN=m2.ear,m2-data-quality-plugin.war,konturfocus.war,interoperable-api.war,TechExpert.war
# === wsdl === #
- WSDL_HOST=
- WSDL_PORT=8080
- WSDL_SECURE_PORT=8443
- WSDL_URI_SCHEME=http
# === mail === #
- MAIL_HOST=
- MAIL_PORT=
- MAIL_FROM=""
- MAIL_USER=""
- MAIL_PASS=""
# === cores === #
- HOST_CORE=
# === hazelcast === #
- HZCST_NODE_NAME=
- HZCST_NODE_PORT=5701
# === rabbit === #
- MAIN_HOST=
- MAIN_USER=
- MAIN_PASS=
# === plugins === #
- DEPLOY_BLOB_CONTENT_STORAGE=enable
- DEPLOY_CONTENT_STORAGE_INTF=enable
- DEPLOY_MINIO_CONTENT_STORAGE=disable
# JasperServer
- JASPER_URI=
- JASPER_USER=
- JASPER_PASS=
# ML
- ML_MERGER=
- ML_SOCKET=
# === ssl === #
- KEYSTORE_PASS=null
- KEY_PASS=null
- REWRITE_HTTP=null
# === java options === #
- JAVA_OPTS=-Xms1G -Xmx1G -XX:MetaspaceSize=1G -XX:MaxMetaspaceSize=1G -Djava.net.preferIPv4Stack=true -Djava.awt.headless=true -Dsun.net.client.defaultReadTimeout=60000 -Dsun.net.client.defaultConnectTimeout=10000 -Djdk.serialFilter='maxbytes=20971520;maxdepth=128;maxarray=700000;maxrefs=700000'
volumes:
- /etc/localtime:/etc/localtime:ro
doc:
image: nexus.sdi-solution.ru/semantic/doc:dev
container_name: documentation
networks:
- semantic-network
ports:
- 80:80
gateway:
image: nexus.sdi-solution.ru/semantic/gateway:v1.0
container_name: gateway
networks:
- semantic-network
ports:
- 8281:8281
- 8781:8787
environment:
listen_port: 8281
downstream: http://wildfly:8080
client_id:
client_secret:
public_client:
token_endpoint: KEYCLOAK_URL/protocol/openid-connect/token
cache_max_size: 256
JAVA_OPTS_APPEND: -agentlib:jdwp=transport=dt_socket,address=gateway:8787,server=y,suspend=n
command: ['java', '-jar', '/opt/basic.auth.gateway-1.0.0-SNAPSHOT.jar']
jasperserver:
image: nexus.sdi-solution.ru/semantic/jasperserver:sdi
container_name: jasperserver
hostname: jasperserver
restart: always
networks:
- semantic-network
ports:
- 7080:8080
- 7443:8443
environment:
- DB_TYPE=postgres
- DB_HOST=jasperserver-db
- DB_PORT=
- DB_USER=
- DB_PASSWORD=
- WF_DOMAIN_NAME=
- IP_WILDFLY=
- BUILDOMATIC_MODE=script
volumes:
- /etc/localtime:/etc/localtime:ro
jasperserver-db:
image: postgres:15
container_name: jasperserver-db
hostname: jasperserver-db
restart: always
networks:
- semantic-network
environment:
- POSTGRES_PASSWORD=
- POSTGRES_USER=
- POSTGRES_DB=jasperserver
volumes:
- /etc/localtime:/etc/localtime:ro
- jasperserver-data:/var/lib/postgresql/data
rabbitmq:
image: rabbitmq:3.12.13-management
container_name: rabbitmq
hostname: rabbitmq
restart: always
networks:
- semantic-network
ports:
- 15672:15672
- 5672:5672
environment:
- RABBITMQ_DEFAULT_USER=m2
- RABBITMQ_DEFAULT_PASS=m2
- RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=-rabbit log_levels [{connection,error},{default,error}] disk_free_limit 2147483648
volumes:
- /etc/localtime:/etc/localtime:ro
- rabbitmq-data:/var/lib/rabbitmq
fluent-bit:
image: fluent/fluent-bit:4.0
container_name: fluent-bit-telemetery
restart: always
ports:
- 24223:24223
- 24224:24224
- 24225:24225
networks:
- semantic-network
volumes:
- /etc/localtime:/etc/localtime:ro
- ./config/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
opensearch-uj:
image: nexus.sdi-solution.ru/infra/opensearch:1.3.2
container_name: opensearch-uj
restart: always
ports:
- 10200:9200
environment:
- node.name=opensearch-node1
- cluster.name=opensearch-cluster
- "OPENSEARCH_JAVA_OPTS=-Xms2G -Xmx2G"
- http.port=9200
- discovery.type=single-node
- bootstrap.memory_lock=true
- plugins.security.ssl.http.enabled=false
- indices.query.bool.max_clause_count=100000
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /etc/localtime:/etc/localtime:ro
- opensearch-uj-data:/usr/share/opensearch/data
networks:
- semantic-network
dashboards:
image: opensearchproject/opensearch-dashboards:1.3.2
container_name: dashboards
ports:
- 5601:5601
volumes:
- ./config/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
depends_on:
- opensearch-uj
networks:
- semantic-network
otel-collector:
image: nexus.sdi-solution.ru/infra/opentelemetry:0.123.0
container_name: otel-collector
command: [--config=/etc/otel-collector-config.yml]
volumes:
- /etc/localtime:/etc/localtime:ro
- ./config/otel-collector-config.yml:/etc/otel-collector-config.yml
ports:
- 4317:4317 # OTLP gRPC receiver
- 4318:4318 # OTLP http receiver
- 1234:1234 # /metrics endpoint
networks:
- semantic-network
data-prepper:
image: opensearchproject/data-prepper:2
container_name: data-prepper
volumes:
- /etc/localtime:/etc/localtime:ro
- ./config/pipelines.yaml:/usr/share/data-prepper/pipelines/pipelines.yaml
# - ./log_pipeline.yaml:/usr/share/data-prepper/pipelines/log_pipeline.yaml
- ./config/data-prepper-config.yaml:/usr/share/data-prepper/config/data-prepper-config.yaml
ports:
- 2021:2021
- 2022:2022
- 21890:21890
networks:
- semantic-network
# minio:
# image: minio/minio:RELEASE.2025-02-07T23-21-09Z
# hostname: minio
# restart: always
# networks:
# - semantic-network
# ports:
# - 9000:9000
# - 9001:9001
# environment:
# MINIO_ROOT_USER:
# MINIO_ROOT_PASSWORD:
# command: server --console-address ":9001" /data
# volumes:
# - minio-data:/data
volumes:
opensearch-data:
name: opensearch-data
jasperserver-data:
name: jasperserver-data
rabbitmq-data:
name: rabbitmq-data
opensearch-uj-data:
name: opensearch-uj-data
# minio-data:
# name: minio-data
networks:
semantic-network:
driver: bridge
opensearch
- node.name: название ноды
- cluster.name: название кластера
- OPENSEARCH_JAVA_OPTS: отвечает за размер потребляемой оперативной памяти, где
Xms - минимальный объём, выделяемый на старте,
Xmx - максимальный объём, из-за переменной, отвечающей за подкачку памяти bootstrap.memory_lock=true, Xms должен быть всегда равен Xmx
- plugins.security.ssl.http.enabled: отвечает за параметры безопасности (ДОРАБОТАТЬ)
Остальные переменные по умолчанию
wildfly
- DEBUG: enable | disable - включение|выключение режима отладки
- MAIN_SERVICE_ROUTE: http:// | https:// - URL адрес, по которому будет осуществляться подключение к SemanticApplication
- HOST_NAME: http:// | https:// - URL адрес, по которому будет осуществляться подключение к SemanticApplication
- DATABASE_CONN: jdbc:postgresql://<DB_ADDRESS>:<DB_PORT>/<DB_NAME>?currentSchema=<SCHEMA> - подключение к БД SemanticApplication
- DATABASE_USER: пользователь БД с правами на базу m2
- DATABASE_PASS: пароль пользователя БД с правами на базу m2
- OPENSEARCH_URI: http:// | https:// - URL адрес SemanticSearch
- OPENSEARCH_USER: пользователь SemanticSearch
- OPENSEARCH_PASS: пароль пользователя SemanticSearch
- USER_JOURNAL: enable/disable - включить выключить отправку журнала работы пользователей во внешний источник
- OPENSEARCH_UJ_FLUENT_URI: http:|https//<ip>:24223, указать эндпоинт сборщика, предполагается, что будет расположен на одном хосте с wildfly и будет иметь порт по умолчанию 24223
- OPENSEARCH_UJ_FLUENT_TAG: указать тег, при помощи которого будут собираться и выбирать нужный пайплайн логи, <project_name>_user_journal
- OPENSEARCH_UJ_URI: http|https://<ip>:<port> - указать эндпоинт Opensearch, где будет храниться журнал работы пользователей
- OPENSEARCH_UJ_USER: пользователь SemanticSearch
- OPENSEARCH_UJ_PASS: пароль пользователя SemanticSearch
- OPENSEARCH_UJ_REPLICAS: указать количество реплик индекса, если не требуется, указать 0
- PROVIDER_URL: http://<ip>:<port>/realms/<realm_name>/ | https://<url>/realms/<relam_name>/ - URL адрес, по которому будет осуществляться подключение к SemanticIAM
- UI_CLIENT: имя front клиента
- CLIENT: имя back клиента
- CLIENT_SECRET: секрет back клиента
- PUBLIC_CLIENT: указать false
- KC_ADMIN_URL: указать адрес Keycloak, если есть кастомный локейшен, указать и его
- KC_ADMIN_REALM: указать название, совпадает с именем созданного реалма для SemanticMDM
- OPENTELEMETRY: enable/disable - включение/выключение отправки метрик wildfly
- OTEL_URI: http|https://<ip>:<port>/v1/metrics - указать эндпоинт агента Opentelemetry
- OTEL_SERVICE_NAME: указать имя сервиса, для удобного сбора метрик, имеет вид wildfly_<project_name>
- FLUENT_LOGGER: enable/disable - включить/выключить отправку логов сервиса во внешний источник
- FLUENT_LOGGER_HOST: IP адрес сборщика, предполагается, что будет расположен на одном хосте с wildfly
- FLUENT_LOGGER_PORT: порт сборщика, в текущей конфигурации по умолчанию 24225
- FLUENT_LOGGER_TAG: указать тег, при помощи которого будут собираться и выбирать нужный пайплайн логи, <project_name>
- WSDL_HOST: FQDN SemancitApplication
- MAIL_HOST: FQDN сервера почты
- MAIL_PORT: порт сервера почты
- MAIL_FROM: пользователь, от чьего имени будет осуществляться рассылка
- MAIL_USER: пользователь на сервере почты, который имеет право осуществлять рассылку
- MAIL_PASS: пароль пользователя на сервере почты, который имеет право осуществлять рассылку
- HOST_CORE: указать количество ядер, доступное на сервере
- HZCST_NODE_NAME: имя ноды, обычно соответствует названию проекта
- MAIN_HOST: IP адрес сервера с SemanticMQ
- MAIN_USER: пользователь SemanticMQ
- MAIN_PASS: пароль пользователя SemanticMQ
- DEPLOY_BLOB_CONTENT_STORAGE: enable | disable - выбор режима хранения блобов, не может быть включен одновременно с DEPLOY_MINIO_CONTENT_STORAGE
- DEPLOY_MINIO_CONTENT_STORAGE: enable | disable - выбор режима хранения блобов, не может быть включен одновременно с DEPLOY_BLOB_CONTENT_STORAGE,
при этом нужно добавить переменные подключения к хранилищу S3
- MINIO_URI: http:// | https:// - URL адрес, по которому будет осуществляться подключение к SemanticBlobs, если DEPLOY_MINIO_CONTENT_STORAGE=enable
- MINIO_USER: пользователь SemanticBlobs, если DEPLOY_MINIO_CONTENT_STORAGE=enable
- MINIO_PASS: пароль пользователя SemanticBlobs, если DEPLOY_MINIO_CONTENT_STORAGE=enable
- JASPER_URI: http:// | https:// - URL адрес, по которому будет осуществляться подключение к SemanticReports
- JASPER_USER: пользователь SemanticReports
- JASPER_PASS: пароль пользователя SemanticReports
- ML_MERGER: http:// | http:// - адрес SemanticML (если есть)
- ML_SOCKET: http:// | http:// - адрес SemancicML (если есть)
- JAVA_OPTS:
Xms - минимальный объём памяти всей heap области
Xmx - максимальный объём памяти всей heap области
XX:MetaspaceSize - минимальный объём памяти для metaspace области
XX:MaxMetaspaceSize - максимальный объём памяти для metaspace области
gateway
client_id: имя back клиента
client_secret: секрет back клиента
public_client: указать false
token_endpoint: указать <keycloak_url>/protocol/openid-connect/token
jasperserver
- DB_HOST: подключение к БД в формате ip или FQDN
- DB_PORT: порт подключения к БД
- DB_USER: пользователь БД с правами на базу jasperserver
- DB_PASSWORD: пароль пользователя
- WF_DOMAIN_NAME: FQDN сервера с SemanticApplication
- IP_WILDFLY: ip сервера с SemanticApplication
jasperserver-db
- POSTGRES_PASSWORD: пароль пользователя БД
- POSTGRES_USER: пользователь БД
minio, если DEPLOY_MINIO_CONTENT_STORAGE=enable раскоментировать секцию с minio и volumes: minio-data
- MINIO_ROOT_USER: пользователь SemanticStorage
- MINIO_ROOT_PASSWORD: пароль пользователя SemanticStorage
Создать директорию config
mkdir -p /opt/semantic/config && cd $_
и создать там конфигурационные файлы со следующим содержимым
fluent-bit.conf
[SERVICE]
Flush 1
Daemon off
HTTP_Server on
Log_Level info
[INPUT]
Name http
Listen 0.0.0.0
Port 24223
[INPUT]
Name forward
Listen 0.0.0.0
Port 24224
Buffer_Chunk_Size 32M
Buffer_Max_Size 64M
[INPUT]
Name tcp
Listen 0.0.0.0
Port 24225
Chunk_Size 32M
Buffer_Size 64M
Format json
Tag <project_name>
[FILTER]
Name modify
Match <project_name>
Remove mdc
Remove ndc
Rename level severityText
Rename message body
[OUTPUT]
Name opentelemetry
Match <project_name>
Host <ip_opentelemetry>
Port 4318
Logs_uri /v1/logs
Log_Response_Payload True
Tls Off
[OUTPUT]
Name opensearch
Match <project_name>_user_journal
Host <ip_opensearch_user_journal>
Port <port_opensearch_user_journal>
HTTP_User admin
HTTP_Passwd admin
Index user_journal
Generate_ID On
Suppress_Type_Name On
Logstash_Format Off
и заполнить актуальными данными
otel-collector-config.yml
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
processors:
memory_limiter:
check_interval: 1s
limit_percentage: 75
spike_limit_percentage: 15
batch:
send_batch_size: 10000
timeout: 10s
transform:
log_statements:
- context: log
statements:
- set(log.attributes["threadId"], log.body["threadId"])
- set(log.attributes["threadName"], log.body["threadName"])
- set(log.attributes["loggerClass"], log.body["loggerClassName"])
- set(log.attributes["loggerName"], log.body["loggerName"])
- set(log.attributes["hostName"], log.body["hostName"])
- set(log.attributes["processId"], log.body["processId"])
- set(log.attributes["processName"], log.body["processName"])
- set(log.attributes["sequence"], log.body["sequence"])
- set(log.attributes["serviceName"], log.body["serviceName"])
- set(log.severity_text, log.body["severityText"])
- set(log.body, log.body["body"])
exporters:
debug:
verbosity: basic
prometheus:
endpoint: 0.0.0.0:1234
otlp/logs:
endpoint: http://<host_ip>:21890
tls:
insecure: true
service:
pipelines:
metrics:
receivers: [otlp]
exporters: [prometheus]
logs:
receivers: [otlp]
exporters: [otlp/logs]
processors: [memory_limiter, batch, transform]
и заполнить эндпоинт для экспорта логов
data-prepper-config.yaml
ssl: false
pipelines.yaml
otel-log-pipeline:
source:
otel_logs_source:
ssl: false
port: 21890
sink:
- opensearch:
hosts: ["http://<ip_opensearch_uj>:<port_opensearch_uj>"]
username: admin
password: admin
index: "logs-%{dd.MM.yyyy}"
и указать актуальные данные для подключения к opensearch, по умолчанию имеют вид, как в файле
opensearch_dashboards.yml
server.host: "0.0.0.0"
opensearch.hosts: ["http://<ip_opensearch_uj>:<port_opensearch_uj>"]
opensearch.username: "admin"
opensearch.password: "admin"
и указать актуальные данные для подключения к opensearch, по умолчанию имеют вид, как в файле
logs-policy.sh
#!/bin/bash
OPENSEARCH_HOST="<ip_opensearch_uj>"
OPENSEARCH_PORT="<port_opensearch_uj>"
USERNAME="admin"
PASSWORD="admin"
POLICY_ID="semantic-lifecycle-policy"
TEMPLATE_NAME="logs-template"
echo "Создание ISM политики: $POLICY_ID"
read -r -d '' POLICY_JSON << EOF
{
"policy": {
"description": "Policy with hot -> warm -> cold -> delete phases",
"default_state": "hot",
"states": [
{
"name": "hot",
"actions": [],
"transitions": [
{
"state_name": "warm",
"conditions": {
"min_index_age": "1d"
}
}
]
},
{
"name": "warm",
"actions": [],
"transitions": [
{
"state_name": "cold",
"conditions": {
"min_index_age": "2d"
}
}
]
},
{
"name": "cold",
"actions": [],
"transitions": [
{
"state_name": "delete",
"conditions": {
"min_index_age": "9d"
}
}
]
},
{
"name": "delete",
"actions": [
{
"delete": {}
}
],
"transitions": []
}
],
"ism_template": {
"index_patterns": ["logs-*"],
"priority": 100
}
}
}
EOF
# Создание ISM политики
curl -u "$USERNAME:$PASSWORD" -X PUT "http://${OPENSEARCH_HOST}:${OPENSEARCH_PORT}/_plugins/_ism/policies/${POLICY_ID}" \
-H "Content-Type: application/json" \
-d "$POLICY_JSON"
echo -e "\n Политика ISM '${POLICY_ID}' создана."
echo -e "\n Создание index template: $TEMPLATE_NAME"
echo -e "\n Всё готово! Новые индексы logs-* будут автоматически использовать ISM политику."
заполнить актуальными данными для подключения к Opensearch для user journal и сделать его исполняемым
chmod +x logs-policy.sh
Необходимо положить бэкап базы на сервер в директорию /tmp, с которого будем запускать Semantic (https://dev.sdi-solution.ru:434/svn/SemanticDistrib/branches/v24.2/jasperserver/db) и выполнить следующие команды
docker compose up -d jasperserver-db
docker compose cp /tmp/jasperserver.backup jasperserver-db:/tmp
docker compose exec -i jasperserver-db pg_restore -d jasperserver -U <jasperserver_db_user> /tmp/jasperserver.backup
После можем запускать инстанс
docker compose up -d
После того, как запустится сервис opensearch-uj выполнить скрипт для создания политики жизненного цикла для индекса
/opt/semantic/config/logs-policy.sh
server {
server_name <FQDN>;
return 301 https://<FQDN>$request_uri;
}
upstream <stream_name> {
least_conn;
server <semantic_mq_ip>:<semantic_mq_port> weight=10 max_fails=3 fail_timeout=30s;
}
# Если активна переменная KC_HTTP_RELATIVE_PATH в keycloak
#upstream keycloak {
# ip_hash;
# server <keycloak_ip>:<keycloak_port>;
#}
server {
listen 443 ssl;
server_name <FQDN>;
client_max_body_size 900M;
proxy_read_timeout 1800;
proxy_connect_timeout 1800;
proxy_send_timeout 1800;
send_timeout 1800;
ssl_certificate <path_to_certificate>
ssl_certificate_key <path_to_key>;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES256-SHA384;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
add_header Strict-Transport-Security "max-age=31536000";
location / {
proxy_pass http://<semantic_application_ip>:<semantic_application_port>$request_uri;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /rabbitmq {
rewrite ^/rabbitmq$ /rabbitmq/ permanent;
rewrite ^/rabbitmq/(.*)$ /$1 break;
proxy_pass http://<stream_name>;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /jasperserver {
proxy_pass https://<jasper_ip>:<jasper_port>/jasperserver;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_pass_header Cookie;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_cache_bypass $http_upgrade;
proxy_buffering off;
proxy_ignore_client_abort off;
proxy_redirect off;
keepalive_timeout 160;
proxy_connect_timeout 32;
proxy_send_timeout 32;
proxy_read_timeout 32;
}
location /guides/ {
proxy_pass http://<semantic_application_ip>/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /guides/user/ {
proxy_pass http://<semantic_application_ip>/guides/user/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /guides/developer/ {
proxy_pass http://<semantic_application_ip>/guides/developer/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /guides/administrator/ {
proxy_pass http://<semantic_application_ip>/guides/administrator/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
# Если активна переменная KC_HTTP_RELATIVE_PATH в keycloak
# location /iam {
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
# proxy_set_header Host $http_host;
# proxy_cookie_path ~^/(.+)$ "/$1; SameSite=none";
# proxy_connect_timeout 300;
# proxy_http_version 1.1;
# proxy_pass http://keycloak$request_uri;
# }
}
upstream keycloak {
ip_hash;
server <semantic_iam_ip>:<semantic_iam_port>;
}
server {
ignore_invalid_headers off;
client_max_body_size 0;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
listen 443 ssl;
server_name keycloak.sdi-solution.ru;
ssl_certificate <path_to_certificate>
ssl_certificate_key <path_to_key>;
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $http_host;
proxy_cookie_path ~^/(.+)$ "/$1; SameSite=none";
proxy_connect_timeout 300;
proxy_http_version 1.1;
proxy_pass http://keycloak$request_uri;
}
}
Для установки потребуется программное обеспечение для автоматизации развёртывания и управления приложениями в среде контейнеризации - Docker. Установить, для вашей платформы, можно по инструкции https://docs.docker.com/engine/install/ или любым другим способом, также, необходимым пакетом будет являться docker-compose.
sudo swapoff -a
sudo vi /etc/sysctl.conf
vm.max_map_count=262144
sudo sysctl -p
mkdir -p /opt/semanticsearch && cd $_
services:
opensearch:
image: nexus.sdi-solution.ru/infra/opensearch:1.3.2
hostname: opensearch
networks:
- semantic-network
environment:
- node.name=<node_name>
- cluster.name=<cluster_name>
# Set min and max JVM heap sizes to at least 50% of system RAM
- "OPENSEARCH_JAVA_OPTS=-Xms2G -Xmx2G"
- http.port=9200
- discovery.type=single-node
# Disable JVM heap memory swapping
- bootstrap.memory_lock=true
- plugins.security.ssl.http.enabled=false
ulimits:
# Set memlock to unlimited (no soft or hard limit)
memlock:
soft: -1
hard: -1
# Maximum number of open files for the opensearch user - set to at least 65536
nofile:
soft: 65536
hard: 65536
ports:
- 9200:9200
volumes:
- /etc/localtime:/etc/localtime:ro
- opensearch-data:/usr/share/opensearch/data
restart: always
volumes:
opensearch-data:
name: opensearch-data
networks:
semantic-network:
driver: bridge
- node.name: название ноды
- cluster.name: название кластера
- OPENSEARCH_JAVA_OPTS: отвечает за размер потребляемой оперативной памяти, где Xms - минимальный объём, выделяемый на старте,
Xmx - максимальный объём, из-за переменной, отвечающей за подкачку памяти bootstrap.memory_lock=true,
Xms должен быть всегда равен Xmx
- plugins.security.ssl.http.enabled: отвечает за параметры безопасности (ДОРАБОТАТЬ)
Остальные переменные по умолчанию
sudo docker compose up -d
sudo swapoff -a
sudo vi /etc/sysctl.conf
vm.max_map_count=262144
sudo sysctl -p
mkdir -p /opt/semanticsearch && cd $_
services:
opensearch:
image: opensearchproject/opensearch:1.3.2
hostname: <node_name>
environment:
- node.name=<node_name>
- cluster.name=<cluster_name>
- "OPENSEARCH_JAVA_OPTS=-Xms2G -Xmx2G" # Set min and max JVM heap sizes to at least 50% of system RAM
- network.publish_host=<host_ip>
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
- plugins.security.ssl.http.enabled=false
- plugins.security.disabled=true
- node.roles=<node_role>
- discovery.seed_hosts=<ip_nodes>
- cluster.initial_master_nodes=<master_node_ip>
ulimits:
memlock:
soft: -1 # Set memlock to unlimited (no soft or hard limit)
hard: -1
nofile:
soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
hard: 65536
ports:
- 9200:9200
- 9300:9300
volumes:
- /etc/localtime:/etc/localtime:ro
- opensearch-data:/usr/share/opensearch/data
- hostname: название ноды
- node.name: название ноды
- cluster.name: название кластера, должно быть у всех нод одинаковое
- OPENSEARCH_JAVA_OPTS: отвечает за размер потребляемой оперативной памяти, где Xms - минимальный объём, выделяемый на старте,
Xmx - максимальный объём, из-за переменной, отвечающей за подкачку памяти bootstrap.memory_lock=true,
Xms должен быть всегда равен Xmx
- plugins.security.ssl.http.enabled: отвечает за параметры безопасности (ДОРАБОТАТЬ)
- network.publish_host: важный параметр, чтобы OpenSearch на разных хостах смогли увидеть свой кластер
- discovery.seed_hosts: ip адреса всех нод в кластере
- node.roles: роль ноды (Master, Data, Search, Coordinating, Ingest, Dynamic)
Остальные параметры по умолчанию
mkdir -p /opt/semanticreports && cd $_
services:
jasperserver:
image: nexus.sdi-solution.ru/semantic/jasperserver:sdi
networks:
- semantic-network
environment:
- DB_TYPE=postgresql
- DB_HOST=
- DB_PORT=
- DB_USER=
- DB_PASSWORD=
- WF_DOMAIN_NAME=
- IP_WILDFLY=
- BUILDOMATIC_MODE=script
volumes:
- /etc/localtime:/etc/localtime:ro
ports:
- 8080:8080
- 8443:8443
restart: always
networks:
semantic-network:
driver: bridge
- DB_HOST: подключение к БД в формате ip или FQDN
- DB_PORT: порт подключения к БД
- DB_USER: пользователь БД с правами на базу jasperserver
- DB_PASSWORD: пароль пользователя
- WF_DOMAIN_NAME: FQDN сервера с SemanticApplication
- IP_WILDFLY: ip сервера с SemanticApplication
sudo docker compose up -d
mkdir -p /opt/semanticmq && cd $_
Компонент может работать в двух вариантах, с RabbitMQ или Kafka
rabbitmq:
image: rabbitmq:3.12.13-management
hostname: rabbitmq
restart: always
networks:
- semantic-network
ports:
- 15672:15672
- 5672:5672
environment:
- RABBITMQ_DEFAULT_USER=m2
- RABBITMQ_DEFAULT_PASS=m2
- RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=-rabbit log_levels [{connection,error},{default,error}] disk_free_limit 2147483648
volumes:
- /etc/localtime:/etc/localtime:ro
- rabbitmq-data:/var/lib/rabbitmq
- RABBITMQ_DEFAULT_USER: пользователь RabbitMQ
- RABBITMQ_DEFAULT_PASS: пароль пользователя RabbitMQ
sudo docker compose up -d
services:
kafka-0:
container_name: kafka-0
hostname: kafka-0
image: docker.io/bitnami/kafka:3.7
ports:
- "10093:10093"
restart: always
networks:
- kafka
environment:
# KAFKA CLUSTER
- KAFKA_CFG_NODE_ID=0
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
- KAFKA_CFG_PROCESS_ROLES=broker,controller
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093
- KAFKA_CFG_SUPER_USERS=User:kfadmin
# LISTENERS
- KAFKA_CFG_LISTENERS=INTERNAL://:9091,EXTERNAL://:10093,CONTROLLER://:9093
- KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://:9091,EXTERNAL://<FQDN>:10093 # Replace FQDN to really FQDN or IP
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT
# BROKER SETTINGS
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
- KAFKA_INTER_BROKER_USER=kfadmin
- KAFKA_INTER_BROKER_PASSWORD=000000
- KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
# CONTROLLER SETTINGS
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
- KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN
- KAFKA_CONTROLLER_USER=kfadmin
- KAFKA_CONTROLLER_PASSWORD=000000
# CLIENT SETTINGS
- KAFKA_CLIENT_LISTENER_NAME=EXTERNAL
- KAFKA_CLIENT_USERS=kfadmin,wildfly,other
- KAFKA_CLIENT_PASSWORDS=000000,111111,222222
# COMMON SETTINGS
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
volumes:
- kafka_0_data:/bitnami/kafka
healthcheck:
test: "bash -c 'printf \"\" > /dev/tcp/127.0.0.1/9091; exit $$?;'"
interval: 5s
timeout: 10s
retries: 3
start_period: 30s
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:v0.7.2
ports:
- 8099:8080
restart: always
networks:
- kafka
environment:
# CLUSTER SETTINGS
- KAFKA_CLUSTERS_0_NAME=dev-cluster
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka-0:9091
- KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=PLAINTEXT
- KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN
- DYNAMIC_CONFIG_ENABLED=true
# AUTH UI
- AUTH_TYPE=LOGIN_FORM
- SPRING_SECURITY_USER_NAME=admin
- SPRING_SECURITY_USER_PASSWORD=passw0rd
depends_on:
- kafka-0
healthcheck:
test: wget --no-verbose --tries=1 --spider localhost:8080 || exit 1
interval: 5s
timeout: 10s
retries: 3
start_period: 30s
volumes:
kafka_0_data:
networks:
kafka:
sudo docker compose up -d
На каждом сервере создать директорию /opt/semanticmq
Создать docker-compose.yml со следующим содержимым для 1 сервера
services:
kafka-0:
container_name: kafka-0
hostname: kafka-0
image: docker.io/bitnami/kafka:3.7
ports:
- "10095:10095"
- "10097:10097"
- "10099:10099"
restart: always
networks:
- kafka
environment:
# KAFKA CLUSTER
- KAFKA_CFG_NODE_ID=0
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
- KAFKA_CFG_PROCESS_ROLES=broker,controller
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:10099,1@kafka-1:10099,2@kafka-2:10099
- KAFKA_CFG_SUPER_USERS=User:kfadmin
# LISTENERS
- KAFKA_CFG_LISTENERS=INTERNAL://:10095,EXTERNAL://:10097,CONTROLLER://:10099
- KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://:10095,EXTERNAL://<FQDN>:10097
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:SASL_SSL,EXTERNAL:SASL_SSL,CONTROLLER:SASL_SSL
# BROKER SETTINGS
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
- KAFKA_INTER_BROKER_USER=kfadmin
- KAFKA_INTER_BROKER_PASSWORD=000000
- KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
# CONTROLLER SETTINGS
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
- KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN
- KAFKA_CONTROLLER_USER=kfadmin
- KAFKA_CONTROLLER_PASSWORD=000000
# CLIENT SETTINGS
- KAFKA_CLIENT_LISTENER_NAME=EXTERNAL
- KAFKA_CLIENT_USERS=kfadmin,wildfly,other
- KAFKA_CLIENT_PASSWORDS=000000,111111,222222
# ACL
- KAFKA_CFG_AUTHORIZER_CLASS_NAME=org.apache.kafka.metadata.authorizer.StandardAuthorizer
- KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND=false
# SSL
- KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM=
- KAFKA_CERTIFICATE_PASSWORD=supersecret
- KAFKA_TLS_TYPE=JKS
# SASL
- KAFKA_CFG_SASL_ENABLED_MECHANISMS=PLAIN
# COMMON SETTINGS
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false
volumes:
- kafka_0_data:/bitnami/kafka
healthcheck:
test: "bash -c 'printf \"\" > /dev/tcp/127.0.0.1/10095; exit $$?;'"
interval: 5s
timeout: 10s
retries: 3
start_period: 30s
volumes:
kafka_0_data:
networks:
kafka:
services:
kafka-1:
container_name: kafka-1
hostname: kafka-1
image: docker.io/bitnami/kafka:3.7
ports:
- "10095:10095"
- "10097:10097"
- "10099:10099"
restart: always
networks:
- kafka
environment:
# KAFKA CLUSTER
- KAFKA_CFG_NODE_ID=1
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
- KAFKA_CFG_PROCESS_ROLES=broker,controller
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:10099,1@kafka-1:10099,2@kafka-2:10099
- KAFKA_CFG_SUPER_USERS=User:kfadmin
# LISTENERS
- KAFKA_CFG_LISTENERS=INTERNAL://:10095,EXTERNAL://:10097,CONTROLLER://:10099
- KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://:10095,EXTERNAL://<FQDN>:10097
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:SASL_SSL,EXTERNAL:SASL_SSL,CONTROLLER:SASL_SSL
# BROKER SETTINGS
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
- KAFKA_INTER_BROKER_USER=kfadmin
- KAFKA_INTER_BROKER_PASSWORD=000000
- KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
# CONTROLLER SETTINGS
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
- KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN
- KAFKA_CONTROLLER_USER=kfadmin
- KAFKA_CONTROLLER_PASSWORD=000000
# CLIENT SETTINGS
- KAFKA_CLIENT_LISTENER_NAME=EXTERNAL
- KAFKA_CLIENT_USERS=kfadmin,wildfly,other
- KAFKA_CLIENT_PASSWORDS=000000,111111,222222
# ACL
- KAFKA_CFG_AUTHORIZER_CLASS_NAME=org.apache.kafka.metadata.authorizer.StandardAuthorizer
- KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND=false
# SSL
- KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM=
- KAFKA_CERTIFICATE_PASSWORD=supersecret
- KAFKA_TLS_TYPE=JKS
# SASL
- KAFKA_CFG_SASL_ENABLED_MECHANISMS=PLAIN
# COMMON SETTINGS
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false
volumes:
- kafka_1_data:/bitnami/kafka
healthcheck:
test: "bash -c 'printf \"\" > /dev/tcp/127.0.0.1/10095; exit $$?;'"
interval: 5s
timeout: 10s
retries: 3
start_period: 30s
volumes:
kafka_1_data:
networks:
kafka:
services:
kafka-2:
container_name: kafka-2
hostname: kafka-2
image: docker.io/bitnami/kafka:3.7
ports:
- "10095:10095"
- "10097:10097"
- "10099:10099"
restart: always
networks:
- kafka
environment:
# KAFKA CLUSTER
- KAFKA_CFG_NODE_ID=2
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
- KAFKA_CFG_PROCESS_ROLES=broker,controller
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:10099,1@kafka-1:10099,2@kafka-2:10099
- KAFKA_CFG_SUPER_USERS=User:kfadmin
# LISTENERS
- KAFKA_CFG_LISTENERS=INTERNAL://:10095,EXTERNAL://:10097,CONTROLLER://:10099
- KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://:10095,EXTERNAL://<FQDN>:10097
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT
# BROKER SETTINGS
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
- KAFKA_INTER_BROKER_USER=kfadmin
- KAFKA_INTER_BROKER_PASSWORD=000000
- KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL=PLAIN
# CONTROLLER SETTINGS
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
- KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL=PLAIN
- KAFKA_CONTROLLER_USER=kfadmin
- KAFKA_CONTROLLER_PASSWORD=000000
# CLIENT SETTINGS
- KAFKA_CLIENT_LISTENER_NAME=EXTERNAL
- KAFKA_CLIENT_USERS=kfadmin,wildfly,other
- KAFKA_CLIENT_PASSWORDS=000000,111111,222222
# ACL
- KAFKA_CFG_AUTHORIZER_CLASS_NAME=org.apache.kafka.metadata.authorizer.StandardAuthorizer
- KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND=false
# SSL
- KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM=
- KAFKA_CERTIFICATE_PASSWORD=supersecret
- KAFKA_TLS_TYPE=JKS
# SASL
- KAFKA_CFG_SASL_ENABLED_MECHANISMS=PLAIN
# COMMON SETTINGS
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false
volumes:
- kafka_2_data:/bitnami/kafka
healthcheck:
test: "bash -c 'printf \"\" > /dev/tcp/127.0.0.1/10095; exit $$?;'"
interval: 5s
timeout: 10s
retries: 3
start_period: 30s
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:v0.7.2
ports:
- 8099:8080
restart: always
networks:
- kafka
environment:
# CLUSTER SETTINGS
- KAFKA_CLUSTERS_0_NAME=dev-cluster
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka-0:10095,kafka-1:10095,kafka-2:10095
- KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=PLAINTEXT
- KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=PLAIN
- KAFKA_CLUSTERS_0_PROPERTIES_PROTOCOL=SASL
- KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=org.apache.kafka.common.security.plain.PlainLoginModule required username="kfadmin" password="000000";
- DYNAMIC_CONFIG_ENABLED=true
# AUTH
- AUTH_TYPE=LOGIN_FORM
- SPRING_SECURITY_USER_NAME=kfadmin
- SPRING_SECURITY_USER_PASSWORD=kfadmin
healthcheck:
test: wget --no-verbose --tries=1 --spider localhost:8080 || exit 1
interval: 5s
timeout: 10s
retries: 3
start_period: 30s
volumes:
kafka_2_data:
networks:
kafka:
Запустить сервисы почти одновременно из 3х разных окон
В кластере Kafka таким образом создастся три пользователя kfadmin,wildfly,other с паролем 000000,111111,222222 соответственно. По IP адресу третьего сервера и порту 8099 будет доступна консоль просмотра состояния кластера