diff --git a/.github/ci-prerequisites-atlas.sh b/.github/ci-prerequisites-atlas.sh new file mode 100755 index 000000000000..b0a16f22b1e8 --- /dev/null +++ b/.github/ci-prerequisites-atlas.sh @@ -0,0 +1,6 @@ +# Reclaims disk space and sanitizes user home on Atlas infrastructure + +# We use the GitHub cache for the relevant parts of these directories. +# Also, we do not want to keep things like ~/.gradle/build-scan-data. +rm -rf ~/.gradle/ +rm -rf ~/.m2/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 18d4d02c5d24..ecaad86410ba 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -14,3 +14,7 @@ updates: - gradle-plugin-portal schedule: interval: "weekly" + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: daily diff --git a/.github/workflows/atlas.yml b/.github/workflows/atlas.yml new file mode 100644 index 000000000000..83a59e0f5eea --- /dev/null +++ b/.github/workflows/atlas.yml @@ -0,0 +1,115 @@ +# The main CI of Hibernate ORM is https://ci.hibernate.org/job/hibernate-orm-pipeline/. +# However, Hibernate ORM builds run on GitHub actions regularly +# to check that it still works and can be used in GitHub forks. +# See https://docs.github.com/en/free-pro-team@latest/actions +# for more information about GitHub actions. + +name: Hibernate ORM build-Atlas + +on: + pull_request: + branches: + - '6.2' + +permissions: {} # none + +# See https://github.com/hibernate/hibernate-orm/pull/4615 for a description of the behavior we're getting. +concurrency: + # Consider that two builds are in the same concurrency group (cannot run concurrently) + # if they use the same workflow and are about the same branch ("ref") or pull request. + group: "workflow = ${{ github.workflow }}, ref = ${{ github.event.ref }}, pr = ${{ github.event.pull_request.id }}" + # Cancel previous builds in the same concurrency group even if they are in process + # for pull requests or pushes to forks (not the upstream repository). + cancel-in-progress: ${{ github.event_name == 'pull_request' || github.repository != 'hibernate/hibernate-orm' }} + +jobs: + build: + permissions: + contents: read + name: Java 11 + # runs-on: ubuntu-latest + runs-on: [self-hosted, Linux, X64, OCI] + strategy: + fail-fast: false + matrix: + include: + - rdbms: oracle_atps + - rdbms: oracle_db19c + - rdbms: oracle_db21c + - rdbms: oracle_db23c + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Reclaim disk space and sanitize user home + run: .github/ci-prerequisites-atlas.sh + - name: Start database + env: + RDBMS: ${{ matrix.rdbms }} + RUNID: ${{ github.run_number }} + run: ci/database-start.sh + - name: Set up Java 11 + uses: graalvm/setup-graalvm@v1 + with: + distribution: 'graalvm' + java-version: '21' + + - name: Generate cache key + id: cache-key + run: | + CURRENT_BRANCH="${{ github.repository != 'hibernate/hibernate-orm' && 'fork' || github.base_ref || github.ref_name }}" + CURRENT_MONTH=$(/bin/date -u "+%Y-%m") + CURRENT_DAY=$(/bin/date -u "+%d") + ROOT_CACHE_KEY="buildtool-cache-atlas" + echo "buildtool-monthly-cache-key=${ROOT_CACHE_KEY}-${CURRENT_MONTH}" >> $GITHUB_OUTPUT + echo "buildtool-monthly-branch-cache-key=${ROOT_CACHE_KEY}-${CURRENT_MONTH}-${CURRENT_BRANCH}" >> $GITHUB_OUTPUT + echo "buildtool-cache-key=${ROOT_CACHE_KEY}-${CURRENT_MONTH}-${CURRENT_BRANCH}-${CURRENT_DAY}" >> $GITHUB_OUTPUT + - name: Cache Maven/Gradle Dependency/Dist Caches + id: cache-maven + uses: actions/cache@v4 + # if it's not a pull request, we restore and save the cache + if: github.event_name != 'pull_request' + with: + path: | + ~/.m2/repository/ + ~/.m2/wrapper/ + ~/.gradle/caches/modules-2 + ~/.gradle/wrapper/ + # A new cache will be stored daily. After that first store of the day, cache save actions will fail because the cache is immutable but it's not a problem. + # The whole cache is dropped monthly to prevent unlimited growth. + # The cache is per branch but in case we don't find a branch for a given branch, we will get a cache from another branch. + key: ${{ steps.cache-key.outputs.buildtool-cache-key }} + restore-keys: | + ${{ steps.cache-key.outputs.buildtool-monthly-branch-cache-key }}- + ${{ steps.cache-key.outputs.buildtool-monthly-cache-key }}- + - name: Restore Maven/Gradle Dependency/Dist Caches + uses: actions/cache/restore@v3 + # if it a pull request, we restore the cache but we don't save it + if: github.event_name == 'pull_request' + with: + path: | + ~/.m2/repository/ + ~/.m2/wrapper/ + ~/.gradle/caches/modules-2 + ~/.gradle/wrapper/ + key: ${{ steps.cache-key.outputs.buildtool-cache-key }} + restore-keys: | + ${{ steps.cache-key.outputs.buildtool-monthly-branch-cache-key }}- + ${{ steps.cache-key.outputs.buildtool-monthly-cache-key }}- + + - name: Run build script + env: + RDBMS: ${{ matrix.rdbms }} + RUNID: ${{ github.run_number }} + run: ./ci/build-github.sh + shell: bash + - name: Upload test reports (if Gradle failed) + uses: actions/upload-artifact@v4 + if: failure() + with: + name: test-reports-java11-${{ matrix.rdbms }} + path: | + ./**/target/reports/tests/ + ./**/target/reports/checkstyle/ + - name: Omit produced artifacts from build cache + run: ./ci/before-cache.sh \ No newline at end of file diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 71424cbd98e2..46a272d7ad5d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -2,13 +2,22 @@ name: "CodeQL" on: push: - branches: [ 'main' ] + branches: [ '6.2' ] pull_request: # The branches below must be a subset of the branches above - branches: [ 'main' ] + branches: [ '6.2' ] schedule: - cron: '34 11 * * 4' +# See https://github.com/hibernate/hibernate-orm/pull/4615 for a description of the behavior we're getting. +concurrency: + # Consider that two builds are in the same concurrency group (cannot run concurrently) + # if they use the same workflow and are about the same branch ("ref") or pull request. + group: "workflow = ${{ github.workflow }}, ref = ${{ github.event.ref }}, pr = ${{ github.event.pull_request.id }}" + # Cancel previous builds in the same concurrency group even if they are in process + # for pull requests or pushes to forks (not the upstream repository). + cancel-in-progress: ${{ github.event_name == 'pull_request' || github.repository != 'hibernate/hibernate-orm' }} + jobs: analyze: name: Analyze @@ -27,11 +36,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -45,7 +54,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -58,6 +67,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 with: category: "/language:${{matrix.language}}" \ No newline at end of file diff --git a/.github/workflows/contributor-build.yml b/.github/workflows/contributor-build.yml index acf7fb8a8276..2d06f42c0fd0 100644 --- a/.github/workflows/contributor-build.yml +++ b/.github/workflows/contributor-build.yml @@ -7,12 +7,9 @@ name: Hibernate ORM build on: - push: - branches: - - 'main' pull_request: branches: - - 'main' + - '6.2' permissions: {} # none @@ -51,7 +48,7 @@ jobs: # Running with HANA requires at least 8GB memory just for the database, which we don't have on GH Actions runners # - rdbms: hana steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: persist-credentials: false - name: Reclaim Disk Space @@ -61,31 +58,61 @@ jobs: RDBMS: ${{ matrix.rdbms }} run: ci/database-start.sh - name: Set up Java 11 - uses: actions/setup-java@v2 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' - - name: Get year/month for cache key - id: get-date - run: echo "yearmonth=$(/bin/date -u "+%Y-%m")" >> $GITHUB_OUTPUT - shell: bash - - name: Cache Maven local repository - uses: actions/cache@v2 + + - name: Generate cache key + id: cache-key + run: | + CURRENT_BRANCH="${{ github.repository != 'hibernate/hibernate-orm' && 'fork' || github.base_ref || github.ref_name }}" + CURRENT_MONTH=$(/bin/date -u "+%Y-%m") + CURRENT_DAY=$(/bin/date -u "+%d") + ROOT_CACHE_KEY="buildtool-cache" + echo "buildtool-monthly-cache-key=${ROOT_CACHE_KEY}-${CURRENT_MONTH}" >> $GITHUB_OUTPUT + echo "buildtool-monthly-branch-cache-key=${ROOT_CACHE_KEY}-${CURRENT_MONTH}-${CURRENT_BRANCH}" >> $GITHUB_OUTPUT + echo "buildtool-cache-key=${ROOT_CACHE_KEY}-${CURRENT_MONTH}-${CURRENT_BRANCH}-${CURRENT_DAY}" >> $GITHUB_OUTPUT + - name: Cache Maven/Gradle Dependency/Dist Caches id: cache-maven + uses: actions/cache@v4 + # if it's not a pull request, we restore and save the cache + if: github.event_name != 'pull_request' with: path: | - ~/.m2/repository - ~/.gradle/caches/ + ~/.m2/repository/ + ~/.m2/wrapper/ + ~/.gradle/caches/modules-2 ~/.gradle/wrapper/ - # refresh cache every month to avoid unlimited growth - key: maven-localrepo-${{ steps.get-date.outputs.yearmonth }} + # A new cache will be stored daily. After that first store of the day, cache save actions will fail because the cache is immutable but it's not a problem. + # The whole cache is dropped monthly to prevent unlimited growth. + # The cache is per branch but in case we don't find a branch for a given branch, we will get a cache from another branch. + key: ${{ steps.cache-key.outputs.buildtool-cache-key }} + restore-keys: | + ${{ steps.cache-key.outputs.buildtool-monthly-branch-cache-key }}- + ${{ steps.cache-key.outputs.buildtool-monthly-cache-key }}- + - name: Restore Maven/Gradle Dependency/Dist Caches + uses: actions/cache/restore@v3 + # if it a pull request, we restore the cache but we don't save it + if: github.event_name == 'pull_request' + with: + path: | + ~/.m2/repository/ + ~/.m2/wrapper/ + ~/.gradle/caches/modules-2 + ~/.gradle/wrapper/ + key: ${{ steps.cache-key.outputs.buildtool-cache-key }} + restore-keys: | + ${{ steps.cache-key.outputs.buildtool-monthly-branch-cache-key }}- + ${{ steps.cache-key.outputs.buildtool-monthly-cache-key }}- + - name: Run build script env: RDBMS: ${{ matrix.rdbms }} run: ./ci/build-github.sh shell: bash - name: Upload test reports (if Gradle failed) - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 if: failure() with: name: test-reports-java11-${{ matrix.rdbms }} diff --git a/.release/.gitignore b/.release/.gitignore new file mode 100644 index 000000000000..20f846ffeb97 --- /dev/null +++ b/.release/.gitignore @@ -0,0 +1,2 @@ +# The folder into which we checkout our release scripts into +* \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index d43fefe45bcc..77d4d3116c23 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -26,41 +26,16 @@ this.helper = new JobHelper(this) helper.runWithNotification { stage('Configure') { this.environments = [ -// new BuildEnvironment( dbName: 'h2' ), -// new BuildEnvironment( dbName: 'hsqldb' ), -// new BuildEnvironment( dbName: 'derby' ), -// new BuildEnvironment( dbName: 'mysql' ), -// new BuildEnvironment( dbName: 'mariadb' ), -// new BuildEnvironment( dbName: 'postgresql' ), -// new BuildEnvironment( dbName: 'edb' ), -// new BuildEnvironment( dbName: 'oracle' ), -// new BuildEnvironment( dbName: 'db2' ), -// new BuildEnvironment( dbName: 'mssql' ), -// new BuildEnvironment( dbName: 'sybase' ), -// Don't build with HANA by default, but only do it nightly until we receive a 3rd instance -// new BuildEnvironment( dbName: 'hana_cloud', dbLockableResource: 'hana-cloud', dbLockResourceAsHost: true ), new BuildEnvironment( node: 's390x' ), - new BuildEnvironment( dbName: 'tidb', node: 'tidb', - additionalOptions: '-DdbHost=localhost:4000', - notificationRecipients: 'tidb_hibernate@pingcap.com' ), + new BuildEnvironment( dbName: 'sybase_jconn' ), new BuildEnvironment( testJdkVersion: '17' ), + new BuildEnvironment( testJdkVersion: '21' ), // We want to enable preview features when testing newer builds of OpenJDK: // even if we don't use these features, just enabling them can cause side effects // and it's useful to test that. - new BuildEnvironment( testJdkVersion: '19', testJdkLauncherArgs: '--enable-preview' ), - new BuildEnvironment( testJdkVersion: '20', testJdkLauncherArgs: '--enable-preview' ), - new BuildEnvironment( testJdkVersion: '21', testJdkLauncherArgs: '--enable-preview' ) + new BuildEnvironment( testJdkVersion: '22', testJdkLauncherArgs: '--enable-preview' ) ]; - if ( env.CHANGE_ID ) { - if ( pullRequest.labels.contains( 'cockroachdb' ) ) { - this.environments.add( new BuildEnvironment( dbName: 'cockroachdb', node: 'cockroachdb', longRunning: true ) ) - } - if ( pullRequest.labels.contains( 'hana' ) ) { - this.environments.add( new BuildEnvironment( dbName: 'hana_cloud', dbLockableResource: 'hana-cloud', dbLockResourceAsHost: true ) ) - } - } - helper.configure { file 'job-configuration.yaml' // We don't require the following, but the build helper plugin apparently does @@ -84,19 +59,21 @@ stage('Configure') { // Avoid running the pipeline on branch indexing if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { - print "INFO: Build skipped due to trigger being Branch Indexing" - currentBuild.result = 'ABORTED' - return + print "INFO: Build skipped due to trigger being Branch Indexing" + currentBuild.result = 'NOT_BUILT' + return +} +// This is a limited maintenance branch, so don't run this on pushes to the branch, only on PRs +if ( !env.CHANGE_ID ) { + print "INFO: Build skipped because this job should only run for pull request, not for branch pushes" + currentBuild.result = 'NOT_BUILT' + return } stage('Build') { Map executions = [:] Map> state = [:] environments.each { BuildEnvironment buildEnv -> - // Don't build environments for newer JDKs when this is a PR - if ( helper.scmSource.pullRequest && buildEnv.testJdkVersion ) { - return - } state[buildEnv.tag] = [:] executions.put(buildEnv.tag, { runBuildOnNode(buildEnv.node ?: NODE_PATTERN_BASE) { @@ -108,7 +85,7 @@ stage('Build') { // Use withEnv instead of setting env directly, as that is global! // See https://github.com/jenkinsci/pipeline-plugin/blob/master/TUTORIAL.md withEnv(["JAVA_HOME=${javaHome}", "PATH+JAVA=${javaHome}/bin"]) { - state[buildEnv.tag]['additionalOptions'] = '' + state[buildEnv.tag]['additionalOptions'] = '-PmavenMirror=nexus-load-balancer-c4cf05fd92f43ef8.elb.us-east-1.amazonaws.com' if ( testJavaHome ) { state[buildEnv.tag]['additionalOptions'] = state[buildEnv.tag]['additionalOptions'] + " -Ptest.jdk.version=${buildEnv.testJdkVersion} -Porg.gradle.java.installations.paths=${javaHome},${testJavaHome}" @@ -124,131 +101,18 @@ stage('Build') { try { stage('Start database') { switch (buildEnv.dbName) { - case "h2_1_4": - state[buildEnv.tag]['additionalOptions'] = state[buildEnv.tag]['additionalOptions'] + - " -Pgradle.libs.versions.h2=1.4.197 -Pgradle.libs.versions.h2gis=1.5.0" - break; - case "hsqldb_2_6": - state[buildEnv.tag]['additionalOptions'] = state[buildEnv.tag]['additionalOptions'] + - " -Pgradle.libs.versions.hsqldb=2.6.1" - break; - case "derby_10_14": - state[buildEnv.tag]['additionalOptions'] = state[buildEnv.tag]['additionalOptions'] + - " -Pgradle.libs.versions.derby=10.14.2.0" - break; - case "mysql": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('mysql:8.0.31').pull() - } - sh "./docker_db.sh mysql" - state[buildEnv.tag]['containerName'] = "mysql" - break; - case "mysql_5_7": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('mysql:5.7.40').pull() - } - sh "./docker_db.sh mysql_5_7" - state[buildEnv.tag]['containerName'] = "mysql" - break; - case "mariadb": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('mariadb:10.9.3').pull() - } - sh "./docker_db.sh mariadb" - state[buildEnv.tag]['containerName'] = "mariadb" - break; - case "mariadb_10_3": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('mariadb:10.3.36').pull() - } - sh "./docker_db.sh mariadb_10_3" - state[buildEnv.tag]['containerName'] = "mariadb" - break; - case "postgresql": - // use the postgis image to enable the PGSQL GIS (spatial) extension - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('postgis/postgis:15-3.3').pull() - } - sh "./docker_db.sh postgresql" - state[buildEnv.tag]['containerName'] = "postgres" - break; - case "postgresql_10": - // use the postgis image to enable the PGSQL GIS (spatial) extension - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('postgis/postgis:10-2.5').pull() - } - sh "./docker_db.sh postgresql_10" - state[buildEnv.tag]['containerName'] = "postgres" - break; case "edb": - docker.image('quay.io/enterprisedb/edb-postgres-advanced:14.5-3.2-postgis').pull() + docker.image('quay.io/enterprisedb/edb-postgres-advanced:15.4-3.3-postgis').pull() sh "./docker_db.sh edb" state[buildEnv.tag]['containerName'] = "edb" break; - case "edb_10": - docker.image('quay.io/enterprisedb/edb-postgres-advanced:10.22').pull() - sh "./docker_db.sh edb_10" - state[buildEnv.tag]['containerName'] = "edb" - break; - case "oracle": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('gvenzl/oracle-xe:21.3.0-full').pull() - } - sh "./docker_db.sh oracle" - state[buildEnv.tag]['containerName'] = "oracle" - break; - case "oracle_11_2": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('gvenzl/oracle-xe:11.2.0.2-full').pull() - } - sh "./docker_db.sh oracle_11" - state[buildEnv.tag]['containerName'] = "oracle" - break; - case "db2": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('ibmcom/db2:11.5.7.0').pull() - } - sh "./docker_db.sh db2" - state[buildEnv.tag]['containerName'] = "db2" - break; - case "db2_10_5": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('ibmoms/db2express-c@sha256:a499afd9709a1f69fb41703e88def9869955234c3525547e2efc3418d1f4ca2b').pull() - } - sh "./docker_db.sh db2_10_5" - state[buildEnv.tag]['containerName'] = "db2" - break; - case "mssql": - docker.image('mcr.microsoft.com/mssql/server@sha256:f54a84b8a802afdfa91a954e8ddfcec9973447ce8efec519adf593b54d49bedf').pull() - sh "./docker_db.sh mssql" - state[buildEnv.tag]['containerName'] = "mssql" - break; - case "mssql_2017": - docker.image('mcr.microsoft.com/mssql/server@sha256:7d194c54e34cb63bca083542369485c8f4141596805611e84d8c8bab2339eede').pull() - sh "./docker_db.sh mssql_2017" - state[buildEnv.tag]['containerName'] = "mssql" - break; - case "sybase": + case "sybase_jconn": docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { docker.image('nguoianphu/docker-sybase').pull() } sh "./docker_db.sh sybase" state[buildEnv.tag]['containerName'] = "sybase" break; - case "cockroachdb": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('cockroachdb/cockroach:v22.2.2').pull() - } - sh "./docker_db.sh cockroachdb" - state[buildEnv.tag]['containerName'] = "cockroach" - break; - case "cockroachdb_21_2": - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('cockroachdb/cockroach:v21.2.16').pull() - } - sh "./docker_db.sh cockroachdb_21_2" - state[buildEnv.tag]['containerName'] = "cockroach" - break; } } stage('Test') { @@ -256,8 +120,11 @@ stage('Build') { withEnv(["RDBMS=${buildEnv.dbName}"]) { try { if (buildEnv.dbLockableResource == null) { - timeout( [time: buildEnv.longRunning ? 480 : 120, unit: 'MINUTES'] ) { - sh cmd + withCredentials([file(credentialsId: 'sybase-jconnect-driver', variable: 'jconnect_driver')]) { + sh 'cp -f $jconnect_driver ./drivers/jconn4.jar' + timeout( [time: buildEnv.longRunning ? 480 : 120, unit: 'MINUTES'] ) { + sh cmd + } } } else { @@ -290,6 +157,9 @@ stage('Build') { } }) } + executions.put('Hibernate Search Update Dependency', { + build job: '/hibernate-search-dependency-update/6.2', propagate: true, parameters: [string(name: 'UPDATE_JOB', value: 'orm6.2'), string(name: 'ORM_REPOSITORY', value: helper.scmSource.remoteUrl), string(name: 'ORM_PULL_REQUEST_ID', value: helper.scmSource.pullRequest.id)] + }) parallel(executions) } @@ -310,6 +180,7 @@ class BuildEnvironment { String toString() { getTag() } String getTag() { "${node ? node + "_" : ''}${testJdkVersion ? 'jdk_' + testJdkVersion + '_' : '' }${dbName}" } + String getRdbms() { dbName.contains("_") ? dbName.substring(0, dbName.indexOf('_')) : dbName } } void runBuildOnNode(String label, Closure body) { diff --git a/README.adoc b/README.adoc index 17585d6a6e81..54a71ad43388 100644 --- a/README.adoc +++ b/README.adoc @@ -1,12 +1,11 @@ -Hibernate ORM is a library providing Object/Relational Mapping (ORM) support -to applications, libraries, and frameworks. +Hibernate ORM is a powerful object/relational mapping solution for Java, and makes it easy to develop persistence logic for applications, libraries, and frameworks. -It also provides an implementation of the JPA specification, which is the standard Java specification for ORM. +Hibernate implements JPA, the standard API for object/relational persistence in Java, but also offers an extensive set of features and APIs which go beyond the specification. -This is the repository of its source code; see https://hibernate.org/orm/[Hibernate.org] for additional information. +See https://hibernate.org/orm/[Hibernate.org] for more information. -image:https://ci.hibernate.org/job/hibernate-orm-pipeline/job/main/badge/icon[Build Status,link=https://ci.hibernate.org/job/hibernate-orm-pipeline/job/main/] -image:https://img.shields.io/badge/Revved%20up%20by-Gradle%20Enterprise-06A0CE?logo=Gradle&labelColor=02303A[link=https://ge.hibernate.org/scans] +image:https://ci.hibernate.org/job/hibernate-orm-pipeline/job/6.2/badge/icon[Build Status,link=https://ci.hibernate.org/job/hibernate-orm-pipeline/job/6.2/] +image:https://img.shields.io/badge/Revved%20up%20by-Gradle%20Enterprise-06A0CE?logo=Gradle&labelColor=02303A[link=https://develocity.commonhaus.dev/scans] == Continuous Integration @@ -18,7 +17,7 @@ for its CI needs. See == Building from sources -The build requires at least Java 11 JDK. +The build requires at least Java 11 and at most Java 17. Hibernate uses https://gradle.org[Gradle] as its build tool. See the _Gradle Primer_ section below if you are new to Gradle. @@ -178,7 +177,7 @@ The following table illustrates a list of commands for various databases that ca |Oracle XE |`./docker_db.sh oracle` -|`./gradlew test -Pdb=oracle_ci` +|`./gradlew test -Pdb=oracle_free_ci` |DB2 |`./docker_db.sh db2` @@ -188,10 +187,14 @@ The following table illustrates a list of commands for various databases that ca |`./docker_db.sh mssql` |`./gradlew test -Pdb=mssql_ci` -|Sybase ASE +|Sybase ASE (jTDS) |`./docker_db.sh sybase` |`./gradlew test -Pdb=sybase_ci` +|Sybase ASE (jConnect) +|`./docker_db.sh sybase` +|`./gradlew test -Pdb=sybase_jconn_ci` + |SAP HANA |`./docker_db.sh hana` |`./gradlew test -Pdb=hana_ci` @@ -199,4 +202,24 @@ The following table illustrates a list of commands for various databases that ca |CockroachDB |`./docker_db.sh cockroachdb` |`./gradlew test -Pdb=cockroachdb` + +|TiDB +|`./docker_db.sh tidb` +|`./gradlew test -Pdb=tidb` |=== + +To stop a container started by `docker`, use the command + +[source] +---- +docker stop $container_name +---- + +NOTE:: Substitute `podman` command for `docker` if using `podman` + +E.g., to stop the mariadb container + +[source] +---- +docker stop mariadb +---- diff --git a/build.gradle b/build.gradle index 15fb87227b1a..6f2d0d805d98 100644 --- a/build.gradle +++ b/build.gradle @@ -12,7 +12,7 @@ buildscript { dependencies { classpath 'org.hibernate.build.gradle:version-injection-plugin:1.0.0' - classpath 'org.asciidoctor:asciidoctor-gradle-plugin:1.5.7' +// classpath 'org.asciidoctor:asciidoctor-gradle-plugin:1.5.7' classpath 'de.thetaphi:forbiddenapis:3.2' classpath 'org.junit.platform:junit-platform-gradle-plugin:1.0.1' } @@ -25,11 +25,13 @@ plugins { id 'org.hibernate.orm.database-service' apply false id 'biz.aQute.bnd' version '6.3.1' apply false - id 'io.github.gradle-nexus.publish-plugin' version '1.1.0' + id 'org.checkerframework' version '0.6.34' + id 'org.hibernate.orm.build.jdks' id 'idea' id 'org.jetbrains.gradle.plugin.idea-ext' version '1.0' id 'eclipse' + id "com.dorongold.task-tree" version "2.1.1" } apply from: file( 'gradle/module.gradle' ) @@ -38,79 +40,29 @@ apply from: file( 'gradle/module.gradle' ) // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Release Task -task release { +tasks.register('release') { description = "The task performed when we are performing a release build. Relies on " + "the fact that subprojects will appropriately define a release task " + "themselves if they have any release-related activities to perform" doFirst { - def javaVersionsInUse = [gradle.ext.javaVersions.main.compiler, gradle.ext.javaVersions.main.release, - gradle.ext.javaVersions.test.compiler, gradle.ext.javaVersions.test.release, - gradle.ext.javaVersions.test.launcher].toSet() - if ( javaVersionsInUse != [JavaLanguageVersion.of( 11 )].toSet() ) { - throw new IllegalStateException( "Please use JDK 11 to perform the release. Currently using: ${javaVersionsInUse}" ) + def javaVersionsInUse = jdkVersions.allVersions + if (javaVersionsInUse != [JavaLanguageVersion.of(11)].toSet()) { + throw new IllegalStateException("Please use JDK 11 to perform the release. Currently using: ${javaVersionsInUse}") } } } -task publish { +tasks.register('publish') { description = "The task performed when we want to just publish maven artifacts. Relies on " + "the fact that subprojects will appropriately define a release task " + "themselves if they have any publish-related activities to perform" } -ext { - if ( project.hasProperty( 'hibernatePublishUsername' ) ) { - if ( ! project.hasProperty( 'hibernatePublishPassword' ) ) { - throw new GradleException( "Should specify both `hibernatePublishUsername` and `hibernatePublishPassword` as project properties" ); - } - } -} - -nexusPublishing { - repositories { - sonatype { - username = project.hasProperty( 'hibernatePublishUsername' ) ? project.property( 'hibernatePublishUsername' ) : null - password = project.hasProperty( 'hibernatePublishPassword' ) ? project.property( 'hibernatePublishPassword' ) : null - } - } -} - -gradle.taskGraph.addTaskExecutionGraphListener( - new TaskExecutionGraphListener() { - @Override - void graphPopulated(TaskExecutionGraph graph) { - String[] tasksToLookFor = [ - 'publish', - 'publishToSonatype', - 'publishAllPublicationsToSonatype', - 'publishPublishedArtifactsPublicationToSonatypeRepository', - 'publishRelocationArtifactsPublicationToSonatypeRepository', - ] - - for ( String taskToLookFor : tasksToLookFor ) { - if ( graph.hasTask( taskToLookFor ) ) { - // trying to publish - make sure the needed credentials are available - - if ( project.property( 'hibernatePublishUsername' ) == null ) { - throw new RuntimeException( "`-PhibernatePublishUsername=...` not found" ) - } - if ( project.property( 'hibernatePublishPassword' ) == null ) { - throw new RuntimeException( "`-PhibernatePublishPassword=...` not found" ) - } - - break; - } - } - } - } -) - - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // CI Build Task -task ciBuild { +tasks.register('ciBuild') { description = "The task performed when one of the 'main' jobs are triggered on the " + "CI server. Just as above, relies on the fact that subprojects will " + "appropriately define a release task themselves if they have any tasks " + @@ -137,6 +89,3 @@ idea { name = "hibernate-orm" } } - - - diff --git a/changelog.txt b/changelog.txt index ddfc8bdb5d7e..e35554666555 100644 --- a/changelog.txt +++ b/changelog.txt @@ -3,6 +3,1060 @@ Hibernate 6 Changelog Note: Please refer to JIRA to learn more about each issue. +Changes in 6.2.43.Final (August 24, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/34858 + +** Bug + * [HHH-19719] - org.hibernate.query.sqm.function.SelfRenderingSqmWindowFunction#appendHqlString throws IndexOutOfBoundsException when has no arguments + * [HHH-19712] - Column deduplication leads to wrong alias calculation for native query alias expansion + * [HHH-19687] - Criteria query with lazy @OneToOne and @EmbeddedId throws exception + + +Changes in 6.2.42.Final (August 10, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/34396 + +** Bug + * [HHH-19698] - In list predicate fails to parse with null literal + * [HHH-19453] - sequence support not working on db2 As400 7.3 + * [HHH-19261] - OracleDialect getQueryHintString incorrectly joins supplied hints + * [HHH-17897] - Joining Multiple CTEs in a HQL + * [HHH-17522] - Support correlation of CTEs + + +Changes in 6.2.41.Final (July 20, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/34198 + +** Bug + * [HHH-19621] - SUBSTRING function for DB2i Series is broken + * [HHH-19550] - Attribute join on correlated from node receives wrong root + * [HHH-19497] - Fallback implementation of IN LIST is incorrect with dangerous consequences + + +Changes in 6.2.40.Final (July 06, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/34099 + +** Bug + * [HHH-19464] - Storing a binary data into BLOB on Oracle cutting off its content. + * [HHH-18898] - Specific mistake in HQL gives NullPointerException in AbstractSqlAstTranslator + + +Changes in 6.2.39.Final (June 29, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/34066 + +** Bug + * [HHH-19577] - BytecodeProviderImpl.SetPropertyValues wrongly emits duplicate stack map frames + * [HHH-19573] - Presence of wrapper byte array pollutes BasicTypeRegistry + * [HHH-19571] - CloningPropertyCall causes non-deterministic bytecode for AccessOptimizer + * [HHH-19560] - TupleTransformer and ResultListTransformer trash the query interpretation cache + + +Changes in 6.2.38.Final (May 26, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/33637 + +** Bug + * [HHH-19477] - ConnectionReleaseMode.AFTER_STATEMENT ineffective due to missing connection release + + +Changes in 6.2.37.Final (May 18, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32944 + +** Bug + * [HHH-17629] - Criteria and Entity graph generates same join clause twice + + +Changes in 6.2.36.Final (March 16, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32878 + +** Bug + * [HHH-19246] - Fetch join makes partially covered EntityGraph ineffective + + +Changes in 6.2.35.Final (March 09, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32713 + +** Bug + * [HHH-19206] - Bytecode-enhanced dirty checking ineffective if entity's embedded ID set manually (to same value) + * [HHH-18229] - "UnsupportedOperationException: Re-work support for semi-resolve " with null value in column referenced by @JoinColumn + * [HHH-17420] - JoinColumn throws an `occurs out of order` AnnotationException + * [HHH-17151] - NPE when binding null parameter in native query with explicit TemporalType + + +Changes in 6.2.34.Final (February 26, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32680 + +** Bug + * [HHH-18027] - Delete/Update statement with IN clause fails on second invocation + + +Changes in 6.2.33.Final (February 24, 2025) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32355 + +** Bug + * [HHH-19126] - Plural valued paths should be collection-typed instead of element typed + * [HHH-18912] - ORM release process + + +Changes in 6.2.32.Final (October 02, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32344 + +** Bug + * [HHH-16772] - Generated bytecode for HibernateAccessOptimizer class is invalid and causes operand stack overflow issue. + +** Improvement + * [HHH-18507] - allow overwriting the default db image with environment variable in docker_db.sh + + +Changes in 6.2.31.Final (August 21, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32331 + +** Bug + * [HHH-18506] - Flush performance degradation due to itable stubs + + +Changes in 6.2.30.Final (August 06, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32322 + +** Bug + * [HHH-18410] - Performance regression due to megamorphic calls + + +Changes in 6.2.28.Final (July 03, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32321 + +** Bug + * [HHH-18280] - Support named procedure parameters down to the JDBC level + + +Changes in 6.2.27.Final (July 02, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32314 + +** Bug + * [HHH-17344] - DB2zDialect NullPointerException + + +Changes in 6.2.26.Final (June 11, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32287 + +** Bug + * [HHH-18170] - Subquery randomly generating wrong SQL due to duplicate alias + * [HHH-16461] - @Version + session.refresh(entity, LockMode.PESSIMISTIC_WRITE) leads to StaleObjectStateException + +** Proposal + * [HHH-18141] - Skip optimizeUnloadedDelete(DeleteEvent) on POST_COMMIT_DELETE + + +Changes in 6.2.25.Final (April 25, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32273 + +** Bug + * [HHH-17964] - Hibernate using wrong column order for ElementCollection query when composite ID is present + * [HHH-17885] - Same named formula attribute of different Embedded uses same selection expression + * [HHH-17883] - Wrong order of primary key join columns with @EmbeddedId and joined inheritance + * [HHH-16994] - Native query with select * and join tables with same named columns throws NonUniqueDiscoveredSqlAliasException + + +Changes in 6.2.24.Final (March 29, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32272 + +** Bug + * [HHH-17734] - Hibernate should prioritize provider_class over datasource + * [HHH-17493] - Negating a SqmNegatedPredicate has no effect on Hql string + + +Changes in 6.2.23.Final (March 27, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32250 + +** Bug + * [HHH-17705] - NullPointerException during enhancement when using the default BytecodeProvider in Wildfly + * [HHH-17689] - Cache SQL statement for unique key lookup + +** Improvement + * [HHH-17319] - Use Oracle GraalVM for Atlas builds + + +Changes in 6.2.22.Final (January 24, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32246 + +** Bug + * [HHH-17670] - NPE in FromClause#findTableGroup + * [HHH-17634] - Merging a new entity having a @GeneratedValue id should not set the generated id to the original entity + + +Changes in 6.2.21.Final (January 22, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32241 + +** Bug + * [HHH-17643] - Allow uninitialized proxy serialization even when a SessionFactory is not available + + +Changes in 6.2.20.Final (January 16, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32239 + +** Bug + * [HHH-17643] - Allow uninitialized proxy serialization even when a SessionFactory is not available + +** Task + * [HHH-17536] - Update MySQL JDBC driver to 8.2.0 + + +Changes in 6.2.19.Final (January 10, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32236 + +** Bug + * [HHH-17623] - Ordering collection @OrderBy based on association fails + * [HHH-17106] - Varchar(1) column for Java Enum fails with ClassCastException + + +Changes in 6.2.18.Final (January 05, 2024) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32230 + +** Bug + * [HHH-17606] - Cannot resolve path of nested generic mapped-superclass joins + * [HHH-17380] - Persisting an entity with a non generated id and @MapsId throws PropertyValueException + * [HHH-16935] - Hibernate cdi extension Regression introduced by HHH-16096 + * [HHH-16881] - Envers RevisionListener is not created when Hibernate CDI Extensions are enabled + + +Changes in 6.2.17.Final (December 19, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32229 + +** Bug + * [HHH-17405] - Cannot resolve path of generic mapped-superclass association path + + +Changes in 6.2.16.Final (December 15, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32225 + +** Bug + * [HHH-16593] - mappedBy non-association + + +Changes in 6.2.15.Final (December 05, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32222 + +** Bug + * [HHH-17491] - UnknownEntityTypeException thrown when multiple subclasses define an attribute with the same name and one is a MappedSuperclass + + +Changes in 6.2.14.Final (December 01, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32207 + +** Bug + * [HHH-17445] - Subquery correlated path expressions do not work with nullness predicates + * [HHH-17384] - OneToMany association with @NotFound results in SQL with different JOIN-type for SELECT (LEFT JOIN) and COUNT (JOIN) + * [HHH-17351] - Missing basic type registration after changing preferred jdbc type + * [HHH-17329] - Query Cache contains null values when entity was loaded as proxy + * [HHH-17294] - Non-Embeddable JSON objects are not marked as dirty when modified + * [HHH-17143] - More not-found fix ups + * [HHH-17108] - Error In Native Query when adding two Entity's and using composite key. Error: Unable to find column position by name. + * [HHH-17102] - @SqlResultSetMapping doesn’t work with @Inheritance(strategy = InheritanceType.JOINED) + +** Improvement + * [HHH-17315] - Reduce Oracle database user privileges to avoid cross schemas clashes + +** Task + * [HHH-17367] - Add links to tutorials in documentation + * [HHH-17323] - Add annotation processor for tests explicitly to fix JDK22 issues + + +Changes in 6.2.13.Final (October 11, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32203 + +** Bug + * [HHH-16933] - JSON issue on Oracle 23c + * [HHH-17225] - Right join does not return records from the right table if there's no matching record in the from clause + * [HHH-17264] - OracleAggregateSupport should use json_object to create empty json + * [HHH-17276] - Columns that are widened to LOB type aren't considered as LOBs + * [HHH-17290] - Embeddable with a primitive field cannot be set to null + * [HHH-17302] - Use nocache for MariaDB sequences + +** Improvement + * [HHH-16876] - Update Oracle testing to 23c + +** Task + * [HHH-17237] - Integrate Oracle Atlas for CI testing + * [HHH-17296] - Gradle 8.4 upgrade + * [HHH-17297] - Adapt limited support testing for 6.2 + + +Changes in 6.2.12.Final (October 06, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32201 + +** Bug + * [HHH-16897] - Delete query for entity using table-per-class inheritance may result in NullPointerException + +** Task + * [HHH-17239] - Automate maintenance releases + + +Changes in 6.2.11.Final (October 04, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32194 + +** Bug + * [HHH-17257] - ElementCollection is missing elements after update if embeddable type has primitive columns marked as nullable + * [HHH-17256] - elementcollection embeddable contains only nulls when loading in multiple steps + * [HHH-17255] - Cannot resolve table reference for treated many-to-many map collection simple attribute + * [HHH-17243] - org.hibernate.MappingException: Could not resolve named type : duration + * [HHH-17242] - Oracle temporal arithmetic performance regression + * [HHH-17234] - PostgreSQL: operator does not exist '!=-' exception when using JPA not equals operator '<>' with negative number + * [HHH-17231] - HHH-17231 SQLGrammarException when selecting property of grouped join association in subquery + * [HHH-17228] - Exception when selecting from a subclass in single table inheritance when another entity has 1-1 attributes of other subclasses + * [HHH-17205] - QueryException: could not resolve property: null on OneToOne with composite key + * [HHH-17188] - Query Cache contains null values when entity is found in Persistence Context + * [HHH-17178] - Is null predicate on treated plural path causes InterpretationException + * [HHH-17172] - SerializableProxy: Unable to retrieve matching session factory by name (Hibernate 6 regression?) + * [HHH-17170] - Custom @SQLDelete, @SQLDeleteAll and @SQLUpdate not working on associated collections + * [HHH-17155] - DynamicInstantiationResult wrong java type constructor selected + * [HHH-17131] - Regression in entity streams with associated collections resulting in result duplication + * [HHH-17105] - SQL clause from @WhereJoinTable is no longer used for DELETE queries (6.2 regression) + * [HHH-17104] - Bug with max() request inside projection + * [HHH-17100] - CustomType wrongly calls UserType#disassemble + * [HHH-17049] - Bytecode Enhancement, extra records created for associations created in constructor + * [HHH-16962] - General documentation improvements + * [HHH-16945] - CTE query cycle attribute evaluated incorrectly on MSSQL using collation "Latin1_General_CI_AS" + * [HHH-16900] - Iteration of MutationOperation(s) triggers type pollution + * [HHH-16885] - Hibernate 6.1.6 - 6.2.7 changes outer to inner join when EntityGraph and @Id + * [HHH-16781] - @JavaType annotation is ignored when Basic type is already registered for same JdbcType/Java class pair + * [HHH-15317] - When setting a read-only entity to non-readonly, mutable properties that are user defined types are not cloned + * [HHH-13741] - LOG_SLOW_QUERY parameter doesn't always log SQL + +** Improvement + * [HHH-17286] - Upgrade integration tests to use Oracle JDBC driver version 23.3.0.23.09 + * [HHH-17282] - Introduce a specialized Map for NavigablePath to Initializer + * [HHH-17220] - Avoid runtime lookups of JdbcService from TableGenerator and TableStructure + * [HHH-17082] - Improve documentation of configuration settings + * [HHH-16706] - Spelling fix in Envers.adoc + * [HHH-16403] - Avoid unnecessary wrapping for exceptions/errors thrown by getters/setters + +** Task + * [HHH-17187] - Avoid 0 byte trailing UUID's in tests + * [HHH-17160] - Gradle 8.3 upgrade + * [HHH-17087] - Update container images to the latest version + * [HHH-17047] - Follow up tasks for Gradle 8.2 upgrade + * [HHH-17015] - Upgrade to Gradle 8 + * [HHH-16675] - Reenable native query OffsetDateTime test after Oracle JDBC update + + +Changes in 6.2.9.Final (September 19, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32192 + +** Bug + * [HHH-17221] - AssertionError initializing a collection with FetchMode.SUBSELECT and IdClass having only one field + * [HHH-17202] - ArrayStoreException for single field id class entity collection batch loading + * [HHH-17201] - Unexpected value type exception for unordered multi id Load with ordered return disable + * [HHH-17173] - Getting one-to-one association through a referenece to a bytecode enhanced entity fails + * [HHH-17159] - java.lang.StackOverflowError during Update on Entity with Embeddable and JSON + * [HHH-17156] - NPE when an Embeddable column is reused in another class related by inheritance + * [HHH-17080] - [Envers] AuditReader.getRevisionNumberForDate(LocalDateTime) uses Epoch Seconds instead of Epoch Millis + * [HHH-17079] - NPE when using CompositeUserType with generic fields in Hibernate 6 + + +Changes in 6.2.8.Final (August 31, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32178 + +** Bug + * [HHH-17130] - Refactor restrictive code in SubselectFetch checking for instanceof EntityResultInitializer + * [HHH-17116] - AssertionError when using lateral join to subquery if criteria tree copy is enabled + * [HHH-17085] - SQLGrammarException when selecting property of grouped join association in HyperSQL and PostgreSQL + * [HHH-17081] - Loading an entity which uses joined inheritance fails if two or more implementations have the same association property name + * [HHH-17078] - Memory leak in ScrollableResultsConsumer + * [HHH-17075] - Lazy loading of association with non-PK referencedColumnName always yields null when bytecode enhancement is enabled + * [HHH-17074] - Type inference in duration arithmetic is wrong + * [HHH-17071] - Basic Collection of 'Converted' Embeddables stopped working with 6.2 + * [HHH-17064] - Fetch mode select causes StackOverflowError if used together with fetch type lazy + * [HHH-17045] - Unable to locate parameter for RESTRICT - UPDATE error when updating entity with RowId + * [HHH-17041] - Embeddable and Generics throws IllegalArgumentException + * [HHH-17035] - Bytecode enhancement leads to StackOverflowError with specific setup involving different generic parameter names + * [HHH-17034] - Bytecode enhancement leads to broken constructor for a generic embedded field in a twice removed MappedSuperclass + * [HHH-17033] - Invalid SQL generated when implicit joins are used + * [HHH-17019] - EntityListener is not triggered for a LAZY loaded association when using bytecode enhancement + * [HHH-17016] - Using a legacy MySQL dialect leads to an error + * [HHH-17011] - The ddl is applied every time the application starts, even if the table structure has not changed. + * [HHH-17004] - Nullpointer wenn using setFirstResult and setMaxResults in diffrent combinations + * [HHH-17001] - An "on"-clause referencing the affected join node causes a StackOverflowException + * [HHH-17000] - Do not keep static references to log levels + * [HHH-16997] - Embedded components in HibernateProxy are not initilized when entity has reference to another entity of the same type + * [HHH-16988] - AssertionError in AbstractSqmPath.copyTo when using subquery correlated to root with more than 1 level in the class hierarchy + * [HHH-16979] - SQLGrammarException for JOINED entity with additional join in query + * [HHH-16970] - Orphan removal not working in @Embeddable for FetchType.EAGER collections + * [HHH-16968] - StackOverflowError when using NaturalIdLoadAccess and bi-directional association + * [HHH-16966] - StackOverFlowError with @ManyToOne and @Proxy( lazy=false ) + * [HHH-16959] - Fail to batch delete entities with nested embeddeds + * [HHH-16952] - Bytecode enhancement now requires embedded component to be @Embeddable + * [HHH-16939] - Optimistic and Pessimistic Force Increment Update Statements are not committed when using a batch + * [HHH-16938] - Meta annotation @AnyDiscriminatorValue does not work + * [HHH-16937] - Persisting entity with nullable @Any field throws “not-null property references a null or transient value” + * [HHH-16928] - Subquery in 'where' clause causes additional 'join' creation in SQM + * [HHH-16919] - Bidirectional mapping with @Any causes infinite loop on fetch + * [HHH-16918] - Can't use the the primary id column as the joincolumn for @Any association + * [HHH-16911] - MapBackedClassValue ClassLoader leak + * [HHH-16908] - Fail to compute column mapping on entity with idClass having one field as pk of a OneToOne association + * [HHH-16901] - Embedded field in entity association from composite key not correctly instantiated + * [HHH-16888] - Left join with exists subquery adds extra join + * [HHH-16879] - Inconsistent @Parent behavior if cache is enabled + * [HHH-16878] - Invalid SQL generated for mutation when using association with @NotFound + * [HHH-16825] - Cascading an entity with a composite key causes NullPointerException in AbstractClassJavaType.extractHashCode + * [HHH-16816] - Hibernate 6.2 could not execute certain delete statements + * [HHH-16798] - ClassCastException in Hibernate 6 when "join fetch" is used in a query with entity inheritance + * [HHH-16766] - Unable to fully load entity with IdClass lazily or with max fetch depth + * [HHH-16762] - SchemaMigrator generates ORA-22859 errors on columns annotated with @Lob + * [HHH-16759] - Merge fails when entity has an Embedded Java record + * [HHH-16755] - NPE while retrieving static metamodel attribute of generic embeddable extending a superclass + * [HHH-16670] - ENUM column definitions cannot be customized (on MySQL/MariaDB) + * [HHH-16611] - Sybase: ERROR org.hibernate.engine.jdbc.spi.SqlExceptionHelper - db.tablename not found + * [HHH-16591] - @org.hibernate.annotations.JdbcTypeCode does not override the `hibernate.type.preferred_duration_jdbc_type` type + + +Changes in 6.2.7.Final (July 20, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32170 + +** Bug + * [HHH-16943] - Column ordering leads to wrong column order in unique constraints + * [HHH-16926] - AssertionError when sorting by fields of joined entity + * [HHH-16923] - Deleting all entities of a given type fails when using a composite primary key and order_updates + * [HHH-16912] - Hibernate 6.2 breaks backward compatibility + * [HHH-16905] - Since 6.2.6 criteria query with nested fetches do not fetch some entities any more + * [HHH-16892] - LocalXmlResourceResolver does not resolve dtd URLs that use https scheme + * [HHH-16890] - StackOverflowError when loading entities with @Proxy(lazy = false) + * [HHH-16886] - Incorrect SQL alternative for tuple-in-lists on DBMS not supporting tuple-in-lists + * [HHH-16872] - JDBC fetch size seems to be ignored post migration to hibernate6 + * [HHH-16871] - Queries returning null values for related entities depending on 'max_fetch_depth' + * [HHH-16845] - Cached entity with @EmbeddedId cannot use @ElementCollection + * [HHH-16837] - UnknownTableReferenceException with @ManyToMany and @OrderBy and InheritenceType.JOINED + * [HHH-16833] - Assertion Error when inserting two entities linked with a OneToOne relation + * [HHH-16812] - StackOverflowError an embeddable's @Parent is a subclass in an inheritance tree + * [HHH-16811] - Dirty property lost and not detected with Batch Fetch, Embedded and FetchMode SELECT + * [HHH-16810] - Fail to delete entity with a composite id using an @IdClass with one of its fields mapped from the id of a @ManyToOne association + * [HHH-16803] - Setting value of enum type to null using nullLiteral: "Not a managed type" + * [HHH-16792] - Incorrect generated SQL query when using CriteriaUpdate on an entity with inheritance + * [HHH-16765] - Cannot parse quoted table name + * [HHH-16754] - unionAll with subquery orderby using path expression throws an error + * [HHH-16709] - @EmbeddedId Foreign key comparator throws NPE with order_updates enabled + * [HHH-16667] - Hibernate 6 PrimaryKeyJoinColumn causes implicit version update to fail + * [HHH-16586] - When merging a persisted entity with a null Version, Hibernate treats entity as transient instead of throwing an Exception + * [HHH-15917] - Unrelated Entity Join throws IllegalStateException + * [HHH-15720] - Using multiple select in a multiselect generates java.lang.ArrayIndexOutOfBoundsException + +** Task + * [HHH-16877] - Improved efficiency of lookup for MutationExecutorService + + +Changes in 6.2.6.Final (June 30, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32164 + +** Bug + * [HHH-16868] - IN clause with more than 1000 expressions + * [HHH-16839] - Failing to update a one-to-one lazy association with `OptimisticLockType.DIRTY` and enabled bytecode enhancement + * [HHH-16832] - Bytecode enhancement leads to broken constructor for a generic embedded field in a MappedSuperclass + * [HHH-16831] - Revisit logging statement that use string concat for formatted logging + * [HHH-16821] - Fail to delete entity that contains an embedded with a many to many association as field + * [HHH-16820] - When batching enabled the LockModeType is ignored + * [HHH-16799] - Using bytecode enhancement may result in incorrect AccessType being picked + * [HHH-16784] - @MappedSuperclass with parameterized interface types no longer works + * [HHH-16743] - StackOverflowError when loading a ManyToOne whith @Proxy(lazy=false) + * [HHH-16733] - Doing polymorphic queries with interfaces results in exception SqmRoot not yet resolved to TableGroup + * [HHH-16721] - HQL with subquery with entity path at least two levels deep produces wrong SQL + * [HHH-16713] - Unexpected warning: "HHH100001: JDBC driver did not return the expected number of row counts" + * [HHH-16711] - Reading entities using @MappedSuperclass does not work with classes in foreign packages + * [HHH-16701] - "persister" is null during validation with lazy collections + * [HHH-16602] - NPE When initializing a lazy collection during flush + * [HHH-16589] - In-Clause Parameter Padding mistreats Dilect.getInExpressionCountLimit which can cause ORA-01795: maximum number of expressions in a list is 1000 + * [HHH-16582] - Alias XXX used for multiple from-clause elements error when executing query with 2 level join + * [HHH-16537] - Wrong SQL generated when root join is not used in sub-query + * [HHH-16514] - Property sorting can lead to incorrect column mappings with derived embeddable keys + * [HHH-16275] - LockTest.testLock*FkTarget fail on Sybase with timeout getting lock + * [HHH-14703] - @ElementCollection + nested @Embeddable -> @Enumerated ignored + * [HHH-14078] - Hibernate returns duplicates into @OneToMany collection after merge + +** Improvement + * [HHH-16794] - With Bytecode enhancement a lazy ManyToOne association targeting an Entity annotated with @Proxy(lazy = false) is eagerly loaded + +** Task + * [HHH-16873] - Expose fast-path cloning constructors for UpdateCoordinatorStandard + + +Changes in 6.2.5.Final (June 15, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32158 + +** Bug + * [HHH-16787] - Multi-valued parameter with single element parameter list fails with no such element exception + * [HHH-16786] - NPE in SqmParameterInterpretation when binding null for select item in insert-select statement + * [HHH-16774] - Composite field tracking in enhancement not working if field not annotated with @Embedded + * [HHH-16770] - Entity valued path expansion for GROUP BY clause causes problems for in-subquery predicates + * [HHH-16759] - Merge fails when entity has an Embedded Java record + * [HHH-16750] - ClassCastException when an Entity with an ElementCollection has an EmbeddableId with just one field and Batch is enabled + * [HHH-16745] - TransientObjectException when loading versioned entity from second-level cache with @OneToOne LAZY mapping + * [HHH-16744] - Wrong class created when reusing a join column for different entities in multiple ManyToOne + * [HHH-16740] - Fetching an element collection on an entity with a composite id fails when batch size > 1 + * [HHH-16719] - Upgrading from 6.1.7.Final to 6.2.2.Final breaks Criteria queries for model with inheritance + * [HHH-16714] - Caching does not work properly when hibernate.cache.use_structured_entries is turned on + * [HHH-16697] - Auto type discovery for aggregate functions wrongly determines Integer instead of BigDecimal for Oracle + * [HHH-16693] - MappedSuperClass with generic collections not mapped correctly + * [HHH-16682] - Changes in @JdbcTypeCode(SqlTypes.JSON) are not written to DB + * [HHH-16678] - AssertionError in QueryLiteral. when using "update versioned Entity ..." if @Version is a long + * [HHH-16673] - Fail to get access lazy fetched field ( @ManyToOne ) which is part of a composite Id (using an @IdClass) when stored in L2 cache + * [HHH-16661] - Filters causes NPE when default schema is set + * [HHH-16641] - @OrderColumn fails for a generic collection with NPE on XProperty.getMapKey() + * [HHH-16640] - Some methods in org.hibernate.cfg.Configuration return `null` instead of `this` for method chaining + * [HHH-16614] - Typo in logic handling before/after table creation of auxiliary database objects + * [HHH-16612] - NPE on @JdbcTypeCode(SqlTypes.JSON) in combination with explicit @Table name&owner + * [HHH-16606] - ClassCastException retrieving byte[] from database + * [HHH-16574] - Treat Querys throws SemanticException if InheritanceType=Joined + * [HHH-16498] - Hibernate DDL validation fails on enum database column type + * [HHH-16490] - The discriminator type mapper overrides any custom type specfied for a field mapped over the same column + * [HHH-16286] - NullPointerException: Cannot invoke "EntityInitializer.getNavigablePath()" because "firstEntityInitializer" is null + * [HHH-16250] - Mapping same column twice and one of them using a custom type leads to "Unknown wrap conversion requested" + * [HHH-15929] - Mapping jsonb of different types in a class inheritance hierarchy does not work. + +** Task + * [HHH-16797] - Mark org.hibernate.metamodel as incubating + * [HHH-16741] - Remove method JdbcMappingContainer#getJdbcMappings() + + +Changes in 6.2.4.Final (June 01, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32156 + +** Bug + * [HHH-16725] - Persisting multiple entities with one-many association and composite key with ordered updates enabled fails + * [HHH-16691] - Avoid join table joins for SqmPath in some more scenarios + * [HHH-16690] - Re-saving deleted object leads to constraint violation when unloaded delete happened + * [HHH-16686] - LocalTime to java.sql.Time unwrap bug. + * [HHH-16669] - Batch loading prevents throwing ObjectNotFoundException on initialization of non-existent enhanced entity + * [HHH-16668] - ConcurrentModificationException when using a criteria with window functions in hibernate 6 + * [HHH-16664] - NPE in MappingMetamodelImpl if inheritance is used in IdClass + * [HHH-16658] - Entity name uses not propagated upwards leading to missing joins + * [HHH-16657] - Type filter in subquery wrongly propagates as filter to table group + * [HHH-16656] - H2 1.4.200 lacks registration of ranking window functions + * [HHH-16655] - Parser error for window frames due to wrong rule order + * [HHH-16650] - Loss in precision due usage of Float data type instead of BigDecimal in case Oracle JDBC driver returning -127 for scale and 0 for precision if precision/scale is unknown. Might happen unexpectedly and requires very close inspection of used SQL + * [HHH-16649] - Hibernate.remove() doesn't work at all + + * [HHH-16639] - Cannot load entity with EnumType identifier and batch size > 1 + * [HHH-16621] - Fix migration guide for one-to-one uniqueness + * [HHH-16617] - When caching queries, filter parameters are not included in the cache key + * [HHH-16613] - @Lazy @ManyToOne @Cacheable association is retrieved as initialized + * [HHH-16594] - Query cache miss when query contains multiple parameters + * [HHH-16570] - Batch fetch with FetchMode JOIN might lead to multiplication in OneToMany items + * [HHH-16565] - ClassCastException when batch fetch and 2nd level cache are enabled for polymorphic entities + * [HHH-16560] - Nested @Embedded within an @EmbeddedId assertion failure "java.lang.AssertionError: isPartOfKey should have been true in this case" + * [HHH-16559] - Batch fetch does not initialize bi-directional entity in HQL + * [HHH-16558] - Detaching an entity removes natural-id cross-reference from shared cache + * [HHH-16543] - Attribute from MappedSuperClass can't be resolved if number of subclasses is greater than 2 + * [HHH-16540] - UnknownTableReferenceException for entity with Embeddable key-value in map + * [HHH-16532] - Hibernate entity with @Any attribute throws a nullpointer exception when merging + * [HHH-16465] - CTE query cycle attribute evaluated incorrectly on MSSQL + * [HHH-16433] - “Locking with ORDER BY is not supported” error in a “select for update” query (Oracle DB) + * [HHH-16385] - QueryKey not serializable when working with @TenantId + * [HHH-16379] - @DynamicUpdate and 'enableDirtyTracking=true' leads to wrong update statement + * [HHH-16370] - Using MapKey on ManyToMany leads to wrong insert SQL + +** Improvement + * [HHH-16717] - Type pollution fix for ExecutableList having to implement Comparable + * [HHH-16716] - Expose ParameterMarkerStrategy on JDBCServices + * [HHH-16705] - AttributeMappingsList should not implement Iterable + * [HHH-16704] - Avoid iterating a LinkedHashMap during ActionQueue processing + * [HHH-16687] - Remove unused internal interface ComparableEntityAction + * [HHH-16685] - Avoid extreme cache misses on EntityPersister#implementsLifecycle() + * [HHH-16683] - Micro optimisations for MappingMetamodelImpl.getEntityDescriptor + * [HHH-16679] - Avoid type pollution problems on iterations of List + * [HHH-16652] - Broken hyperlink in documentation: Hibernate Query Language / Expressions / String concatenation + * [HHH-16626] - JPA hint for Session (EntityManager) level tenant-id + * [HHH-16495] - A query with an non-cross entity join that lacks the on clause should fail with a SemanticQueryException + +** Remove Feature + * [HHH-16677] - Drop JPA static metamodel generation from Gradle plugin + +** Task + * [HHH-16676] - SqmQuerySpec#toHqlString doesn't handle treat expressions + + +Changes in 6.2.3.Final (May 18, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32152 + +** Bug + * [HHH-16576] - Problem in query involving union on MySQL 5.7, MariaDB and Oracle + * [HHH-16573] - Embeddable element collection with updateable = false leads to NullPointerException + * [HHH-16569] - Batch fetch leads to some collections to be wrongly initialized with empty due to wrong generated SQL + * [HHH-16555] - Join fetch query in combination with the inheritance pattern not working in hibernate 6 + * [HHH-16551] - Schema management: discriminator column of type CHAR should always be of length 1 + * [HHH-16549] - NullPointerException when calling LoadQueryInfluencers#isFetchProfileEnabled + * [HHH-16545] - PersistenceUtilHelper.MetadataCache is not Thread-Safe + * [HHH-16542] - Bad get/is handling with bytecode enhancement + * [HHH-16541] - follow-on locking not always handled correctly + * [HHH-16533] - Sybase cast, literal and truncation doesn't work with jConnect driver + * [HHH-16517] - H6: Problems with flush when batch is ON (without @DynamicUpdate) + * [HHH-16505] - Interpretation of "bare" left-join alias reference in HQL + * [HHH-16496] - CLONE - Join fetching nested @OneToMany collections causes result multiplication + * [HHH-16491] - SQM could not correct resolve Generics Embeddable ids + * [HHH-16483] - Recursive CTE using Criteria API will throw `Already registered a copy: org.hibernate.query.sqm.tree.cte.SqmCteStatement` error + * [HHH-16472] - Treated left join with restriction on entity results in incorrect query + * [HHH-16469] - Hibernate repeatedly issues identical SELECT queries to load an optional one-to-one association + * [HHH-16453] - Loading an Entity with two eager collections, produces duplicates when one of the collection is a a bag + * [HHH-16447] - Lazy load of collection fails after EntityManager.refresh + * [HHH-16425] - Function type resolver doesn't work with anonymous tuple path sources + * [HHH-16423] - LazyInitializationException when calling em.refresh() on entity with lazy and eager children + * [HHH-16298] - Failure merging a referenced entity + * [HHH-15726] - Disjunction with treat results in too restrictive query + * [HHH-12981] - Join alias is miscalculated when two descendant entities have same attribute name + +** Improvement + * [HHH-16580] - Some typos in org.hibernate.query.sqm + * [HHH-16546] - Reduce verbosity of logs in a few edge cases + * [HHH-16538] - Remove BeanValidationIntegrator#BV_CHECK_CLASS + * [HHH-15871] - Update EDB testing to version 15 + * [HHH-15160] - Extend Postgis support to distance operators + +** Task + * [HHH-16631] - Avoid unnecessary INFO logging at bootstrap + * [HHH-16630] - Refactor code to avoid frequent lookup of Service MutationExecutorService + * [HHH-16625] - Expose enough state from MetadataImpl to allow cloning it + + +Changes in 6.2.2.Final (April 28, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32149 + +** Bug + * [HHH-16528] - Revert SybaseDialect NameQualifierSupport to CATALOG only + * [HHH-16507] - Hibernate 6.0+ use the wrong logger name for logging the SQL dialect + * [HHH-16493] - OneToManyCollectionPart doesn't disassemble properly embeddable IdClass + * [HHH-16492] - Hibernate 6 does not auto flush when calling Query.stream() with NativeQuery + * [HHH-16488] - Hibernate ORM 6.2.1 / Wrong select clause generated / OneToOne between the same entity + * [HHH-16485] - Insert ordering doesn't consider root entity names + * [HHH-16479] - Generic enum in @MappedSuperclass fails with java.lang.IllegalArgumentException: Named type [...] did not implement BasicType nor UserType + * [HHH-16477] - LAZY @ManyToOne may again break EAGER @ManyToOne when used on the same entity in a different parent with Bytecode Enhancement + * [HHH-16473] - Join fetch a recursive relationship with EmbeddedId fetches wrong values + * [HHH-16471] - Entities serialized in QueryKey causes java.lang.ClassCastException + * [HHH-16458] - JDBC statement is not closed when DeferredResultSetAccess fails to execute a query + * [HHH-16457] - Values returned in ValueAccess.getValues() and getValue(…) not alphabetical for Java records + * [HHH-16438] - Left outer joins do not work with polymorphic entities due to use of discriminator in the main query condition + * [HHH-16409] - SQLGrammarException when selecting property of grouped join association + * [HHH-16397] - Wrong SQL when HQL has subquery using FK attribute of parent query root + * [HHH-16392] - @Where annotation generates bad query with entity name instead of alias in association subquery + * [HHH-16382] - Self referential association with key-many-to-one fails to load + * [HHH-16366] - UnknownTableReferenceException when selecting entity using IdClass + * [HHH-16363] - OneToMany object properties not set when EmbeddedId and it holds an OneToOne relationship to itself + * [HHH-16362] - SemanticException while checking JOIN FETCH owners for nested associations + * [HHH-16347] - The ORDER BY clause in a Window Function doesn't work when using multiple columns + * [HHH-16321] - Hibernate maps NCLOB to ntext on Sybase + * [HHH-16314] - Skip NativeQueryResultTypeAutoDiscoveryTest#dateTimeTypes for Sybase + * [HHH-16304] - unquotedCaseStrategy defaults to UPPER with Sybase + * [HHH-16271] - BulkManipulationTest fails on Sybase with JZ0NK: Generated keys are not available because either the Statement.NO_GENERATED_KEYS was used or no keys were automatically generated. + * [HHH-16270] - Support for null Query parameters on Sybase + * [HHH-16261] - JPA metamodel generator is not considering Java 14's records + * [HHH-15833] - Sybase - store procedure unsupported named parameter + * [HHH-15602] - ByteBuddy enhancement generates faulty code with many-to-many associations + +** Deprecation + * [HHH-16441] - Improve support for @BatchSize + +** Improvement + * [HHH-16511] - Ability to drop-in extra JDBC drivers + * [HHH-16509] - Split parameter limit and IN element limit + * [HHH-16500] - Avoid unnecessary reflection in StreamDecorator + * [HHH-16482] - Convert Functions in StandardConverters to static methods that can be used as method references + * [HHH-16481] - Add a new WrapperArrayHandling for enabled JPA compliance + * [HHH-16468] - Don't create fetch for _identifierMapper anymore + * [HHH-16466] - ARRAY parameter support for multi-key loads + +** Remove Feature + * [HHH-16508] - Remove StreamDecorators + +** Task + * [HHH-16497] - Deprecate JUnit 4 testing annotations + + +Changes in 6.2.1.Final (April 14, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32147 + +** Bug + * [HHH-16459] - Bytecode-enhanced inline dirty tracking ignores generic associations from mapped superclasses + * [HHH-16429] - WF SessionFactoryTestCase test fails + * [HHH-16413] - Subquery with multiple left joins broken in 6.2.0 + * [HHH-16394] - Statement Batch + Version + Dirty Collection leads to OptimisticLockException: Batch update returned unexpected row count from update + * [HHH-16387] - Entity in key not returned when querying + * [HHH-16352] - DynamicUpdate / DynamicInsert with batching leads to parameter binding exception + * [HHH-16349] - ORA-00979 not a group by expression when using a polymorphic entity in HQL group by + * [HHH-16340] - Duplicate column exception when mapping discriminator column inside an EmbeddedId + +** Improvement + * [HHH-16414] - Improve table group resolution for subqueries using the same alias as parent + +** Task + * [HHH-16389] - Introduce annotations for nullness marking/checking + + +Changes in 6.2.0.Final (March 31, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32144 + +** Bug + * [HHH-16404] - SkipFirstLimitHandler calls insertAfterSelect with parameter in wrong order + * [HHH-16402] - Fix default window frame mode to be RANGE instead of ROWS + * [HHH-16396] - HQL with SubQuery having same alias of root Query generates wrong SQL + * [HHH-16391] - Incorrect mutability-plan resolution for converted collection-as-basic mappings + * [HHH-16390] - Execution of non-batched statements do not force execution of current batch + * [HHH-16378] - SQM fails to resolve target type of association defined in mappersuperclass with generics + * [HHH-16372] - NPE in EmbeddedIdentifierMappingImpl#disassemble + * [HHH-16368] - UnsupportedOperationException: compare() not implemented for EntityType + * [HHH-16359] - SingularAttribute with non-nullable column is wrongly reported as optional + * [HHH-16358] - OneToMany with abstract TABLE_PER_CLASS element not working + * [HHH-16355] - Map association with entity typed key breaks if mappedBy is specified + * [HHH-16350] - PreLoadEvent listeners always receive PreLoadEvent with null state + * [HHH-16337] - EntityCollection is not deleted when replaced + * [HHH-16336] - Modify the newly introduced mappedBy type check to be more lenient + * [HHH-16334] - Persisting and loading an entity with a null all-delete-orphan collection results in error + * [HHH-16333] - Handle converters properly in BasicPluralType + * [HHH-16305] - Error when using parameter as both a select function argument and in where clause + * [HHH-16295] - JDBCUpdateMutation with MutationType.INSERT instead of MutationType.UPDATE + * [HHH-16281] - Inconsistent Behaivor of L2 cache between Hibernate 5 and 6 + * [HHH-16274] - Incorrect foreign key column order for @OneToMany with multiple join columns and @IdClass + * [HHH-16272] - Hibernate 6 fails to refresh when collections are accessed in entity setter method when access type is property + * [HHH-16255] - Logging "Executing import script" when it's not being executed + * [HHH-16249] - StatelessSession does not flush when using jdbc batch_size > 1 + * [HHH-16248] - Problem with batching and inheritance with @ManyToOne mapped in multiple subclasses + * [HHH-16241] - Unable to write/query attribute on type with AttributeConverter + * [HHH-16218] - Natural id cache is extremely slow for entities with compound natural id + +** Deprecation + * [HHH-16407] - EntityPersister and CollectionPersister deprecations + +** Improvement + * [HHH-16380] - Avoid map-based mapping metamodel access when instantiating entity instances + * [HHH-16348] - Re-add JdbcOperationQuery#getAppliedParameters for query extensions + * [HHH-16339] - Unify entity and any discriminator handling + * [HHH-16317] - InstantAsTimestampWithTimeZoneJdbcType doesn't work for timestamp on PG-JDBC and MSSQL + * [HHH-16287] - Consider hibernate.timezone.default_storage for OffsetTime typing and storage + +** New Feature + * [HHH-16353] - Make it possible for Hibernate Reactive to override the creation of AttributeMapping and Fetch strategies + * [HHH-16097] - Move all tests from documentation into hibernate-core + +** Task + * [HHH-16400] - Avoid reflection for calls to StackWalker + * [HHH-16388] - Configuration setting for wrapper Byte[]/Character[] treatment + * [HHH-16386] - Disable batching for dynamic-insert and dynamic-update + + +Changes in 6.2.0 (March 31, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32144 + +** Bug + * [HHH-16404] - SkipFirstLimitHandler calls insertAfterSelect with parameter in wrong order + * [HHH-16402] - Fix default window frame mode to be RANGE instead of ROWS + * [HHH-16396] - HQL with SubQuery having same alias of root Query generates wrong SQL + * [HHH-16391] - Incorrect mutability-plan resolution for converted collection-as-basic mappings + * [HHH-16390] - Execution of non-batched statements do not force execution of current batch + * [HHH-16378] - SQM fails to resolve target type of association defined in mappersuperclass with generics + * [HHH-16372] - NPE in EmbeddedIdentifierMappingImpl#disassemble + * [HHH-16368] - UnsupportedOperationException: compare() not implemented for EntityType + * [HHH-16359] - SingularAttribute with non-nullable column is wrongly reported as optional + * [HHH-16358] - OneToMany with abstract TABLE_PER_CLASS element not working + * [HHH-16355] - Map association with entity typed key breaks if mappedBy is specified + * [HHH-16350] - PreLoadEvent listeners always receive PreLoadEvent with null state + * [HHH-16337] - EntityCollection is not deleted when replaced + * [HHH-16336] - Modify the newly introduced mappedBy type check to be more lenient + * [HHH-16334] - Persisting and loading an entity with a null all-delete-orphan collection results in error + * [HHH-16333] - Handle converters properly in BasicPluralType + * [HHH-16305] - Error when using parameter as both a select function argument and in where clause + * [HHH-16295] - JDBCUpdateMutation with MutationType.INSERT instead of MutationType.UPDATE + * [HHH-16281] - Inconsistent Behaivor of L2 cache between Hibernate 5 and 6 + * [HHH-16274] - Incorrect foreign key column order for @OneToMany with multiple join columns and @IdClass + * [HHH-16272] - Hibernate 6 fails to refresh when collections are accessed in entity setter method when access type is property + * [HHH-16255] - Logging "Executing import script" when it's not being executed + * [HHH-16249] - StatelessSession does not flush when using jdbc batch_size > 1 + * [HHH-16248] - Problem with batching and inheritance with @ManyToOne mapped in multiple subclasses + * [HHH-16241] - Unable to write/query attribute on type with AttributeConverter + * [HHH-16218] - Natural id cache is extremely slow for entities with compound natural id + +** Deprecation + * [HHH-16407] - EntityPersister and CollectionPersister deprecations + +** Improvement + * [HHH-16380] - Avoid map-based mapping metamodel access when instantiating entity instances + * [HHH-16348] - Re-add JdbcOperationQuery#getAppliedParameters for query extensions + * [HHH-16339] - Unify entity and any discriminator handling + * [HHH-16317] - InstantAsTimestampWithTimeZoneJdbcType doesn't work for timestamp on PG-JDBC and MSSQL + * [HHH-16287] - Consider hibernate.timezone.default_storage for OffsetTime typing and storage + +** New Feature + * [HHH-16353] - Make it possible for Hibernate Reactive to override the creation of AttributeMapping and Fetch strategies + * [HHH-16097] - Move all tests from documentation into hibernate-core + +** Task + * [HHH-16400] - Avoid reflection for calls to StackWalker + * [HHH-16388] - Configuration setting for wrapper Byte[]/Character[] treatment + * [HHH-16386] - Disable batching for dynamic-insert and dynamic-update + + +Changes in 6.2.0.CR4 (March 17, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32142 + +** Bug + * [HHH-16322] - Merge of entities having a collection with orphanRemoval true fails when bytecode enhancement is enabled + * [HHH-16310] - Fix regressions in documentation and support for MultiTenantConnectionProvider + * [HHH-16297] - Removing an element from a collection of elements removes the whole collection + * [HHH-16280] - Fix Jackson XML mapper support for Oracle Array data types + * [HHH-16279] - Hibernate ORM with hibernate.hbm2ddl.import_files_sql_extractor = multi-line fails to process import.sql containing only comments + * [HHH-16263] - Unable to Join Tables with Superclasses - JoinColumn Occours Out of Order + * [HHH-16258] - NPE with SubselectFetch and inheritance + * [HHH-16252] - SQL syntax error on drop constraint + * [HHH-16247] - Criteria Query with Object-Typed Embedded Parameter throws AssertionError + * [HHH-16240] - ClassCastException when inserting an Entity with @CreationTimestamp in a StatelessSession + * [HHH-16238] - Add support for multiple generic embeddable properties in MappedSuperclass + * [HHH-16237] - versionUpdateGroup is null for Enity extending another Entity when only ElementCollection is updated. + * [HHH-16234] - NPE when enabling DEBUG on orm.results.loading in Hibernate 6.1.7 + * [HHH-16215] - Composite primary key @IdClass attribute mapping is borrowed from the first OneToMany backref and cannot be set + * [HHH-16213] - Right join wrongly added to subquery when using FetchMode.SUBSELECT and is null predicate + * [HHH-16211] - Error with like predicate's escape literal and converted char array property + * [HHH-16210] - Join fetching nested @OneToMany collections causes result multiplication + * [HHH-16191] - Since Hibernate ORM 6.x the FetchType.EAGER called from native queries raises error + * [HHH-16189] - Hibernate 6, wrong order by and group by generated + * [HHH-16188] - Exception when find by field of generic @EmbeddedId with @MappedSuperclass + * [HHH-16180] - AssertionError when using using native query on table with InheritanceStrategy.JOINED + * [HHH-16179] - Session.find should not apply filters + * [HHH-16166] - ClassCastException, mixing mapping types for same field name on different entities + * [HHH-16126] - TransientObjectException when loading versioned entity from second-level cache + * [HHH-16019] - @Where not consistently applied across association boundaries + * [HHH-16015] - Merge operation throws a NPE: Cannot invoke org.hibernate.property.access.spi.Setter.set(Object, Object) when using CompositeUserType + * [HHH-15972] - entitygraph load error when Inheritance JOINED is used in 6.1.6 + * [HHH-15885] - Hibernate 6 OrderBy in the wrong orders + * [HHH-15802] - SubQuery with "in" results in java.lang.ClassCastException: class org.hibernate.metamodel.mapping.internal.BasicEntityIdentifierMappingImpl cannot be cast to class org.hibernate.metamodel.mapping.EntityValuedModelPart + * [HHH-15766] - UNION query doesn't preserve the original parenthesis, and so it fails on PostgreSQL + * [HHH-15664] - Documentation states that native database array types are unsupported + * [HHH-15108] - AggregateClassLoader.findClass() drops all exceptions + * [HHH-13627] - Updated items do not get invalidated when cachemode is set to CacheMode.GET + +** Deprecation + * [HHH-16264] - Deprecate `hibernate.use_entity_where_clause_for_collections` + +** Improvement + * [HHH-16323] - Ensure new service ParameterMarkerStrategy can be looked up efficiently + * [HHH-16320] - Support H2's native json DDL type using custom 'format json' write expressions + * [HHH-16313] - Throw an appropriate error when a mappedBy property references the wrong entity type + * [HHH-16311] - Migrate away from UserType for enum handling + * [HHH-16307] - Finish Gradle plugin DSL + * [HHH-16290] - Mark `o.h.persister.entity` and `o.h.persister.collection` as internal + * [HHH-16284] - Rename JdbcParameterRender to ParameterMarkerStrategy + * [HHH-16276] - More readable exception for non-compliant @OrderBy expressions + * [HHH-16273] - Support for Dialect native ParameterMarkerStrategy + * [HHH-16265] - Remove `@Where#applyInToManyFetch` + * [HHH-16257] - Add `@JavaServiceLoadable` to document Services or strategies that are loadable as Java services + * [HHH-16256] - JdbcParameterRenderer to have an impact on write operations + * [HHH-16198] - Fix splitting of SqlAstCreationState + +** New Feature + * [HHH-16282] - Make it possible for Hibernate Reactive to plug in some custom initializers + * [HHH-16260] - JdbcParameterRenderer not called with dynamic filters + +** Task + * [HHH-16330] - Various micro upgrades of Jakarta EE10 APIs and references used by integration tests + + +Changes in 6.2.0.CR3 (March 01, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32132 + +** Bug + * [HHH-16233] - Bad error for mutable non-root entity + * [HHH-16230] - Wrong data in self-reference using L2C and BatchSize + * [HHH-16217] - Documentation for chapter 34 is being misrendered + * [HHH-16209] - Identically-named association in entity root and embeddable leads to mixup during association loading + * [HHH-16202] - @OrderBy not applied to collections when @Fetch(value = FetchMode.SUBSELECT) + * [HHH-16199] - org.hibernate.AssertionFailure: force initializing collection loading + * [HHH-16197] - Circular references of the same entity result in different Java objects when caching is enabled and using a query + * [HHH-16195] - Hibernate 6 fails to bind appropriate type for mapped superclass with generic attribute + * [HHH-16193] - LazyInitializationException when accessing loaded reference after session is closed using bytecode enhancement + * [HHH-16184] - Two queries are execute to initialize empty collections + * [HHH-16182] - Converted boolean mappings not always working in predicates + * [HHH-16177] - Catalog/schema creation/dropping sometimes ignores default catalog/schema + * [HHH-16175] - AssertionError in StandardEntityGraphTraversalStateImpl.traverse when using entity graph + * [HHH-16169] - NullPointerException when merging detached collections due to LOG.debugf + * [HHH-16165] - Incorrect SQL generated when using SELECT DISTINCT and @OrderBy + * [HHH-16157] - Query with join fetch on collection of entities with @DiscriminatorValue causes duplicate condition + * [HHH-16155] - Problem with @Generated and jdbc.batch_size + * [HHH-16151] - NPE SqmTreePrinter.visitLikePredicate getEscapeCharacter is null + * [HHH-16150] - Hibernate ORM no longer drops the schema when using the create-drop strategy and a session factory observer throws an exception on startup + * [HHH-16137] - Same named parameter appearing in SELECT and WHERE clauses throws JDBC parameter value not bound error + * [HHH-16136] - LAZY @ManyToOne may break EAGER @ManyToOne when used on the same entity in a different parent with Bytecode Enhancement + * [HHH-16131] - Date calculation errors in Oracle and TiDB + * [HHH-16123] - Invalid generated SQL query when accessing join from a treated abstract superclass path + * [HHH-16122] - Named type [class xxx] did not implement BasicType nor UserType + * [HHH-16119] - Named native queries do not work with jakarta.persistence.Tuple result class + * [HHH-16113] - Add version checks for MERGE support to dialects + * [HHH-16112] - Using hibernate.jdbc.batch_size configuration causes BatchedTooManyRowsAffectedException for OneToMany relationship with orphan removal + * [HHH-16109] - createNamedQuery adds offset and limit to all queries + * [HHH-16081] - Converted collection-as-basic values are considered immutable + * [HHH-16080] - UnknownTableReferenceException with JPQL query of Entity with unidirectional OneToOne mapping + * [HHH-16053] - join fetch in @Query has no effect for for nested entities with @DiscriminatorValue (SINGLE_TABLE inheritance) + * [HHH-16009] - jakarta @OrderBy annotation generates a non-transformed column name in the SQL query for a composite key's field + * [HHH-15990] - Unable to determine TableReference when associate ManyToOne fetch lazy and NotFound IGNORE: issue using Projection + * [HHH-15964] - Incorrect results for pageable EntityGraph with Hibernate 6.1.6 + * [HHH-15895] - IllegalArgumentException :Cannot create binding for parameter referencen with criteria builder + * [HHH-15875] - Join fetch doesn't work with @EmbeddedId associations in Hibernate 6 + * [HHH-14514] - Auto evict collection cache not work when use transactional cache concurrency strategy + +** Improvement + * [HHH-16243] - Change scope of AbstractEntityPersister#generateNonIdAttributeMapping + * [HHH-16242] - Change scopes of methods in AbstractEntityInitializer class + * [HHH-16229] - Consider pluggability for rendering "JDBC" parameters + * [HHH-16227] - Introduce SqmMultiTableMutationStrategyProvider + * [HHH-16226] - Introduce JdbcValuesMappingProducerProvider + * [HHH-16214] - Improve memory and access efficiency in SqmFunctionRegistry + * [HHH-16185] - Standardize the date_trunc() function, emulating it for Dialects that don't support native versions + * [HHH-16183] - Change scopes of private methods in InsertCoordinator + * [HHH-16174] - allow 'extract(epoch from ...)' in HQL + * [HHH-16172] - Expose internal state of 2nd-level cache keys + * [HHH-16170] - Support literal enum simple names in HQL 'set' clause. + * [HHH-16133] - Allow @TenantId annotation on @Embedded + * [HHH-15870] - allow SchemaMigrator to update column types + + +Changes in 6.2.0.CR2 (January 27, 2023) +------------------------------------------------------------------------------------------------------------------------ + +https://hibernate.atlassian.net/projects/HHH/versions/32128 + +** Bug + * [HHH-16108] - NullPointerException when flushing a (simple) entity update for models with bytecode enhancement and multiple one-to-one associations (some lazy) + * [HHH-16106] - Using BatchEntitySelectFetchInitializer causes PostLoad to be called before references are initialized + * [HHH-16099] - Log about immutable properties only if dirty on update + * [HHH-16096] - Passing an ExtendedBeanManager which is notified too late leads to initialization error + * [HHH-16077] - Added named native queries cannot specify result-class + * [HHH-16070] - Exception when find by association id that is a generic @EmbeddedId with @MappedSuperclass + * [HHH-16069] - Skip CDI for Hibernate extensions by default + * [HHH-16062] - jakarta.persistence.query.timeout not working on Hibernate 6.1.6 for Criteria Queries + * [HHH-16061] - SqmDynamicInstantiation warns about dynamic Map instantiation when using an entity + * [HHH-16049] - Setting a property to its current value with bytecode enhancement enabled results in unnecessary SQL Update in some (many) cases + * [HHH-16045] - ambiguity in grammar of HQL datetime literals + * [HHH-16043] - Hibernate 6.x breaks collection batch fetching + * [HHH-16039] - Stream fails to fetch object during processing where BatchEntitySelectFetchInitializer gets used + * [HHH-16036] - Fix Oracle CI parameter STATISTICS_LEVEL + * [HHH-16035] - Duration literals and 'by' are almost completely broken + * [HHH-16033] - Many-to-Many inverse mapping referencing the same class uses pk instead of fk field for removal + * [HHH-16031] - @ManyToMany with @JoinTable(inverseColumn = ...) and SortedSet may results in data loss + * [HHH-16025] - Using BatchEntitySelectFetchInitializer with caching leads to caching wrong values + * [HHH-16023] - ArrayIndexOutOfBoundsException: Index 1 out of bounds for length 1 for NamedNativeQuery with generics + * [HHH-16020] - Hibernate doesn't set the correct bind parameter for the offset when using NativeQuery.setFirstResult() on Postgresql + * [HHH-16014] - Querying property from treated path causes IllegalArgumentException: Already registered a copy + * [HHH-16010] - DefaultEvictEventListener calls handleEviction with swapped parameters + * [HHH-16007] - PropertyAccessException when deleting entities with Embeddables with child collections + * [HHH-16005] - @BatchSize with @ManyToMany does not work as expected when migrating 5 -> 6 + * [HHH-16004] - Downcasting with CriteriaBuilder.treat causes ClassCastException + * [HHH-15986] - Eager Bidirectional association, initializing an HibernateProxy should associate to the circular association the HibernateProxy itself + * [HHH-15982] - Bidirectional one-to-one associations produce child entities with null parent entity references + * [HHH-15967] - @OneToOne(mappedBy = ..., fetch = LAZY) in embedded referencing an association within another embedded + * [HHH-15966] - ElementCollection with nested Embeddables fails with ArrayIndexOutOfBoundsException + * [HHH-15950] - AssertionError with bidirectional OneToOne relation using AttributeConverter for the FK + * [HHH-15934] - @Basic(optional=false) has no effect + * [HHH-15933] - broken SQL generated for @ManyToOne with @JoinColumn which references a column of a @SecondaryTable + * [HHH-15928] - Distinct with maxResults fails under SQLServer + * [HHH-15921] - @BatchSize and @IdClass on join column throws exception + * [HHH-15902] - @OneToMany relationship with @Where on child table generates wrong sql + * [HHH-15890] - springboot 3.0.0 + hibernate 6.1.5.Final + IBM DB2 error after migrating from springboot 2.7.0 + hibernate 5.6.9.Final + * [HHH-15888] - review exception reporting in Column.getSqlType()/getSqlTypeName() + * [HHH-15866] - Hibernate validation fails when OneToMany refers to fereign key in embeddable object and is marked as nullable false + * [HHH-15865] - OneToMany foreign key relation throws when id is inside nested embeddables + * [HHH-15864] - OrphanRemoval does not work with embeddables when deleting entity + * [HHH-15854] - Improve CollectionInitializer and EntityDelayedFetchInitializer resolveInstance methods performance when the parent entity is initialized + * [HHH-15851] - Mixup of entities in refresh with BatchSize + * [HHH-15839] - CriteriaBuilder treat method on Path causes ClassCastException + * [HHH-15822] - Unexpected org.hibernate.UnknownEntityTypeException: Unable to locate persister + * [HHH-15794] - NullPointerException when constructing mapping model for nested embeddables with not optional ManyToOne + * [HHH-15617] - Fix Documentation for direct fetching with Filter + * [HHH-15604] - Identically-named association in entity root and elementcollection of embeddables leads to assertion error + * [HHH-15372] - Static metamodel generator references version 2.1 + * [HHH-14526] - Problem with InheritanceType.JOINED without own subtable + * [HHH-14338] - HSQLDialect relies on "MODULE"-Prefix for local temporary table creation but MODULE-Prefix has been dropped in HSQLDB Version 2.5.1 + + Changes in 6.2.0.CR1 (December 22, 2022) ------------------------------------------------------------------------------------------------------------------------ diff --git a/checkerstubs/jakarta.persistence.astub b/checkerstubs/jakarta.persistence.astub new file mode 100644 index 000000000000..55415ca0155e --- /dev/null +++ b/checkerstubs/jakarta.persistence.astub @@ -0,0 +1,166 @@ +// Checkerframework stubs for the jakarta.persistence module + +package jakarta.persistence; + +import org.checkerframework.checker.nullness.qual.Nullable; + +public interface AttributeConverter { + public @Nullable Y convertToDatabaseColumn(@Nullable X attribute); + public @Nullable X convertToEntityAttribute(@Nullable Y dbData); +} +public interface EntityManager extends AutoCloseable { + public @Nullable T find(Class entityClass, Object primaryKey); + public @Nullable T find(Class entityClass, Object primaryKey, Map properties); + public @Nullable T find(Class entityClass, Object primaryKey, LockModeType lockMode); + public @Nullable T find(Class entityClass, Object primaryKey, LockModeType lockMode, Map properties); +} +public interface EntityManagerFactory extends AutoCloseable { + public @Nullable Cache getCache(); +} +public interface Parameter { + public @Nullable String getName(); + public @Nullable Integer getPosition(); +} +public interface PersistenceUnitUtil extends PersistenceUtil { + public @Nullable Object getIdentifier(Object entity); +} +public interface Query { + Query setParameter(Parameter param, @Nullable T value); + Query setParameter(Parameter param, @Nullable Calendar value, TemporalType temporalType); + Query setParameter(Parameter param, @Nullable Date value, TemporalType temporalType); + Query setParameter(String name, @Nullable Object value); + Query setParameter(String name, @Nullable Calendar value, TemporalType temporalType); + Query setParameter(String name, @Nullable Date value, TemporalType temporalType); + Query setParameter(int position, @Nullable Object value); + Query setParameter(int position, @Nullable Calendar value, TemporalType temporalType); + Query setParameter(int position, @Nullable Date value, TemporalType temporalType); + @Nullable T getParameterValue(Parameter param); + @Nullable Object getParameterValue(String name); + @Nullable Object getParameterValue(int position); +} +public interface StoredProcedureQuery extends Query { + StoredProcedureQuery setParameter(Parameter param, @Nullable T value); + StoredProcedureQuery setParameter(Parameter param, @Nullable Calendar value, TemporalType temporalType); + StoredProcedureQuery setParameter(Parameter param, @Nullable Date value, TemporalType temporalType); + StoredProcedureQuery setParameter(String name, @Nullable Object value); + StoredProcedureQuery setParameter(String name, @Nullable Calendar value, TemporalType temporalType); + StoredProcedureQuery setParameter(String name, @Nullable Date value, TemporalType temporalType); + StoredProcedureQuery setParameter(int position, @Nullable Object value); + StoredProcedureQuery setParameter(int position, @Nullable Calendar value, TemporalType temporalType); + StoredProcedureQuery setParameter(int position, @Nullable Date value, TemporalType temporalType); + @Nullable Object getOutputParameterValue(int position); + @Nullable Object getOutputParameterValue(String parameterName); +} +public interface TypedQuery extends Query { + TypedQuery setParameter(Parameter param, @Nullable T value); + TypedQuery setParameter(Parameter param, @Nullable Calendar value, TemporalType temporalType); + TypedQuery setParameter(Parameter param, @Nullable Date value, TemporalType temporalType); + TypedQuery setParameter(String name, @Nullable Object value); + TypedQuery setParameter(String name, @Nullable Calendar value, TemporalType temporalType); + TypedQuery setParameter(String name, @Nullable Date value, TemporalType temporalType); + TypedQuery setParameter(int position, @Nullable Object value); + TypedQuery setParameter(int position, @Nullable Calendar value, TemporalType temporalType); + TypedQuery setParameter(int position, @Nullable Date value, TemporalType temporalType); + @Nullable Object getOutputParameterValue(int position); + @Nullable Object getOutputParameterValue(String parameterName); +} +public interface Tuple { + @Nullable X get(TupleElement tupleElement); + @Nullable X get(String alias, Class type); + @Nullable Object get(String alias); + @Nullable X get(int i, Class type); + @Nullable Object get(int i); + @Nullable Object[] toArray(); +} +public interface TupleElement { + @Nullable String getAlias(); +} + +package jakarta.persistence.criteria; + +public interface CommonAbstractCriteria { + @Nullable Predicate getRestriction(); +} +public interface AbstractQuery extends CommonAbstractCriteria { + AbstractQuery where(@Nullable Expression restriction); + AbstractQuery where(@Nullable Predicate... restrictions); + AbstractQuery having(@Nullable Expression restriction); + AbstractQuery having(@Nullable Predicate... restrictions); + @Nullable Selection getSelection(); + @Nullable Predicate getGroupRestriction(); +} +public interface CriteriaUpdate extends CommonAbstractCriteria { + CriteriaUpdate set(SingularAttribute attribute, @Nullable X value); + CriteriaUpdate set(Path attribute, @Nullable X value); + CriteriaUpdate set(String attributeName, @Nullable Object value); +} +public interface Subquery extends AbstractQuery, Expression { + Subquery where(@Nullable Expression restriction); + Subquery where(@Nullable Predicate... restrictions); + Subquery having(@Nullable Expression restriction); + Subquery having(@Nullable Predicate... restrictions); + @Nullable Expression getSelection(); +} +public interface CriteriaBuilder { + public static interface SimpleCase extends Expression { + SimpleCase when(C condition, @Nullable R result); + SimpleCase when(Expression condition, @Nullable R result); + Expression otherwise(@Nullable R result); + } + public static interface Case extends Expression { + Case when(Expression condition, @Nullable R result); + Expression otherwise(@Nullable R result); + } +} +public interface Join extends From { + Join on(@Nullable Expression restriction); + Join on(@Nullable Predicate... restrictions); + @Nullable Predicate getOn(); +} +public interface SetJoin extends PluralJoin, E> { + SetJoin on(@Nullable Expression restriction); + SetJoin on(@Nullable Predicate... restrictions); +} +public interface ListJoin extends PluralJoin, E> { + ListJoin on(@Nullable Expression restriction); + ListJoin on(@Nullable Predicate... restrictions); +} +public interface MapJoin extends PluralJoin, V> { + MapJoin on(@Nullable Expression restriction); + MapJoin on(@Nullable Predicate... restrictions); +} +public interface Path extends Expression { + // CteRoot etc. + @Nullable Bindable getModel(); + @Nullable Path getParentPath(); + MapJoin on(@Nullable Predicate... restrictions); +} + +package jakarta.persistence.metamodel; + +public interface IdentifiableType extends ManagedType { + @Nullable IdentifiableType getSupertype(); +} + +package jakarta.persistence.spi; + +public interface ClassTransformer { + @Nullable byte[] transform( + @Nullable ClassLoader loader, + String className, + @Nullable Class classBeingRedefined, + ProtectionDomain protectionDomain, + byte[] classfileBuffer) throws TransformerException; +} +public interface PersistenceProvider { + public @Nullable EntityManagerFactory createEntityManagerFactory(String emName, @Nullable Map map); + public EntityManagerFactory createContainerEntityManagerFactory(PersistenceUnitInfo info, @Nullable Map map); +} +public interface PersistenceUnitInfo { + public @Nullable String getPersistenceProviderClassName(); + public @Nullable PersistenceUnitTransactionType getTransactionType(); + public @Nullable DataSource getJtaDataSource(); + public @Nullable DataSource getNonJtaDataSource(); + public @Nullable ClassLoader getClassLoader(); + public @Nullable ClassLoader getNewTempClassLoader(); +} \ No newline at end of file diff --git a/ci/build.sh b/ci/build.sh index f83866aade06..12d76976f487 100755 --- a/ci/build.sh +++ b/ci/build.sh @@ -19,10 +19,46 @@ elif [ "$RDBMS" == "postgresql" ] || [ "$RDBMS" == "postgresql_10" ]; then elif [ "$RDBMS" == "edb" ] || [ "$RDBMS" == "edb_10" ]; then goal="-Pdb=edb_ci -DdbHost=localhost:5444" elif [ "$RDBMS" == "oracle" ]; then + goal="-Pdb=oracle_ci" +elif [ "$RDBMS" == "oracle_xe" ]; then # I have no idea why, but these tests don't seem to work on CI... - goal="-Pdb=oracle_ci -PexcludeTests=**.LockTest.testQueryTimeout*" -elif [ "$RDBMS" == "oracle_11_2" ]; then + goal="-Pdb=oracle_xe_ci" +elif [ "$RDBMS" == "oracle_atps_tls" ]; then + echo "Managing Oracle Autonomous Database..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=autonomous&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') + # I have no idea why, but these tests don't seem to work on CI... + goal="-Pdb=oracle_cloud_autonomous_tls -DrunID=$RUNID -DdbHost=$HOST -DdbService=$SERVICE" +elif [ "$RDBMS" == "oracle_atps" ]; then + echo "Managing Oracle Autonomous Database..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=autonomous2&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') + # I have no idea why, but these tests don't seem to work on CI... + goal="-Pdb=oracle_cloud_autonomous -DrunID=$RUNID -DdbHost=$HOST -DdbService=$SERVICE" +elif [ "$RDBMS" == "oracle_db19c" ]; then + echo "Managing Oracle Database 19c..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=db19c&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') # I have no idea why, but these tests don't seem to work on CI... + goal="-Pdb=oracle_cloud_db19c -DrunID=$RUNID -DdbHost=$HOST -DdbService=$SERVICE" +elif [ "$RDBMS" == "oracle_db21c" ]; then + echo "Managing Oracle Database 21c..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=db21c&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') + # I have no idea why, but these tests don't seem to work on CI... + goal="-Pdb=oracle_cloud_db21c -DrunID=$RUNID -DdbHost=$HOST -DdbService=$SERVICE" +elif [ "$RDBMS" == "oracle_db23c" ]; then + echo "Managing Oracle Database 23c..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=db23c&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') + # I have no idea why, but these tests don't seem to work on CI... + goal="-Pdb=oracle_cloud_db23c -DrunID=$RUNID -DdbHost=$HOST -DdbService=$SERVICE" +elif [ "$RDBMS" == "oracle_11_2" ]; then goal="-Pdb=oracle_legacy_ci -PexcludeTests=**.LockTest.testQueryTimeout*" elif [ "$RDBMS" == "db2" ]; then goal="-Pdb=db2_ci" @@ -32,6 +68,8 @@ elif [ "$RDBMS" == "mssql" ] || [ "$RDBMS" == "mssql_2017" ]; then goal="-Pdb=mssql_ci" elif [ "$RDBMS" == "sybase" ]; then goal="-Pdb=sybase_ci" +elif [ "$RDBMS" == "sybase_jconn" ]; then + goal="-Pdb=sybase_jconn_ci" elif [ "$RDBMS" == "tidb" ]; then goal="-Pdb=tidb" elif [ "$RDBMS" == "hana_cloud" ]; then diff --git a/ci/database-start.sh b/ci/database-start.sh index a1baf2444b0e..433bd1f420e1 100755 --- a/ci/database-start.sh +++ b/ci/database-start.sh @@ -14,6 +14,14 @@ elif [ "$RDBMS" == 'db2' ]; then bash $DIR/../docker_db.sh db2 elif [ "$RDBMS" == 'oracle' ]; then bash $DIR/../docker_db.sh oracle +elif [ "$RDBMS" == 'oracle_atps' ]; then + bash $DIR/../docker_db.sh oracle_atps +elif [ "$RDBMS" == 'oracle_db19c' ]; then + bash $DIR/../docker_db.sh oracle_db19c +elif [ "$RDBMS" == 'oracle_db21c' ]; then + bash $DIR/../docker_db.sh oracle_db21c +elif [ "$RDBMS" == 'oracle_db23c' ]; then + bash $DIR/../docker_db.sh oracle_db23c elif [ "$RDBMS" == 'mssql' ]; then bash $DIR/../docker_db.sh mssql elif [ "$RDBMS" == 'sybase' ]; then diff --git a/ci/jpa-3.1-tck.Jenkinsfile b/ci/jpa-3.1-tck.Jenkinsfile index 246df6d066ab..0c01b1ab9d55 100644 --- a/ci/jpa-3.1-tck.Jenkinsfile +++ b/ci/jpa-3.1-tck.Jenkinsfile @@ -1,10 +1,16 @@ -@Library('hibernate-jenkins-pipeline-helpers@1.5') _ +@Library('hibernate-jenkins-pipeline-helpers') _ // Avoid running the pipeline on branch indexing if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { - print "INFO: Build skipped due to trigger being Branch Indexing" - currentBuild.result = 'ABORTED' - return + print "INFO: Build skipped due to trigger being Branch Indexing" + currentBuild.result = 'NOT_BUILT' + return +} +// This is a limited maintenance branch, so don't run this on pushes to the branch, only on PRs +if ( !env.CHANGE_ID ) { + print "INFO: Build skipped because this job should only run for pull request, not for branch pushes" + currentBuild.result = 'NOT_BUILT' + return } pipeline { @@ -15,14 +21,13 @@ pipeline { jdk 'OpenJDK 11 Latest' } options { - rateLimitBuilds(throttle: [count: 1, durationName: 'day', userBoost: true]) buildDiscarder(logRotator(numToKeepStr: '3', artifactNumToKeepStr: '3')) disableConcurrentBuilds(abortPrevious: true) } parameters { choice(name: 'IMAGE_JDK', choices: ['jdk11'], description: 'The JDK base image version to use for the TCK image.') - string(name: 'TCK_VERSION', defaultValue: '3.1.1', description: 'The version of the Jakarta JPA TCK i.e. `2.2.0` or `3.0.1`') - string(name: 'TCK_SHA', defaultValue: 'b954b39440b331eb4584187d2d8245f82c4d2aa8b02d2e04bd42498a5751312b', description: 'The SHA256 of the Jakarta JPA TCK that is distributed under https://download.eclipse.org/jakartaee/persistence/3.1/jakarta-persistence-tck-${TCK_VERSION}.zip.sha256') + string(name: 'TCK_VERSION', defaultValue: '3.1.2', description: 'The version of the Jakarta JPA TCK i.e. `2.2.0` or `3.0.1`') + string(name: 'TCK_SHA', defaultValue: '618a9fcdb0f897cda71227ed57d035ae1dc40fc392318809a734ffc6968e43ff', description: 'The SHA256 of the Jakarta JPA TCK that is distributed under https://download.eclipse.org/jakartaee/persistence/3.1/jakarta-persistence-tck-${TCK_VERSION}.zip.sha256') booleanParam(name: 'NO_SLEEP', defaultValue: true, description: 'Whether the NO_SLEEP patch should be applied to speed up the TCK execution') } stages { @@ -35,7 +40,7 @@ pipeline { } dir('hibernate') { checkout scm - sh './gradlew publishToMavenLocal -DjakartaJpaVersion=3.1.0' + sh './gradlew publishToMavenLocal -PmavenMirror=nexus-load-balancer-c4cf05fd92f43ef8.elb.us-east-1.amazonaws.com -DjakartaJpaVersion=3.1.0' script { env.HIBERNATE_VERSION = sh ( script: "grep hibernateVersion gradle/version.properties|cut -d'=' -f2", @@ -60,6 +65,7 @@ pipeline { docker volume create tck-vol docker run -v ~/.m2/repository/org/hibernate:/root/.m2/repository/org/hibernate:z -v tck-vol:/tck/persistence-tck/tmp/:z -e NO_SLEEP=${params.NO_SLEEP} -e HIBERNATE_VERSION=$HIBERNATE_VERSION --name tck jakarta-tck-runner docker cp tck:/tck/persistence-tck/tmp/ ./results + rm -Rf ./results/jdk-bundles """ archiveArtifacts artifacts: 'results/**' script { diff --git a/ci/jpa-3.2-tck.Jenkinsfile b/ci/jpa-3.2-tck.Jenkinsfile deleted file mode 100644 index 6f1c8f9b452c..000000000000 --- a/ci/jpa-3.2-tck.Jenkinsfile +++ /dev/null @@ -1,93 +0,0 @@ -@Library('hibernate-jenkins-pipeline-helpers@1.5') _ - -// Avoid running the pipeline on branch indexing -if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { - print "INFO: Build skipped due to trigger being Branch Indexing" - currentBuild.result = 'ABORTED' - return -} - -pipeline { - agent { - label 'LongDuration' - } - tools { - jdk 'OpenJDK 11 Latest' - } - options { - rateLimitBuilds(throttle: [count: 1, durationName: 'day', userBoost: true]) - buildDiscarder(logRotator(numToKeepStr: '3', artifactNumToKeepStr: '3')) - disableConcurrentBuilds(abortPrevious: true) - } - parameters { - choice(name: 'IMAGE_JDK', choices: ['jdk11'], description: 'The JDK base image version to use for the TCK image.') - string(name: 'TCK_VERSION', defaultValue: '3.1.0', description: 'The version of the Jakarta JPA TCK i.e. `2.2.0` or `3.0.1`') - string(name: 'TCK_SHA'/* Avoid default since the version is non-final, defaultValue: '33c8a9380fbdf223e84113a4e20866b42ba2b60a46f1d8ac25d240f0bc919294'*/, description: 'The SHA256 of the Jakarta JPA TCK that is distributed under https://download.eclipse.org/jakartaee/persistence/3.0/jakarta-persistence-tck-${TCK_VERSION}.zip.sha256') - booleanParam(name: 'NO_SLEEP', defaultValue: true, description: 'Whether the NO_SLEEP patch should be applied to speed up the TCK execution') - } - stages { - stage('Build') { - steps { - script { - docker.withRegistry('https://index.docker.io/v1/', 'hibernateci.hub.docker.com') { - docker.image('openjdk:11-jdk').pull() - } - } - dir('hibernate') { - checkout scm - sh './gradlew publishToMavenLocal -DjakartaJpaVersion=3.2.0-SNAPSHOT' - script { - env.HIBERNATE_VERSION = sh ( - script: "grep hibernateVersion gradle/version.properties|cut -d'=' -f2", - returnStdout: true - ).trim() - } - } - dir('tck') { - checkout changelog: false, poll: false, scm: [$class: 'GitSCM', branches: [[name: '*/main']], extensions: [], userRemoteConfigs: [[url: 'https://github.com/hibernate/jakarta-tck-runner.git']]] - sh """ \ - cd jpa-3.2; docker build -f Dockerfile.${params.IMAGE_JDK} -t jakarta-tck-runner --build-arg TCK_VERSION=${params.TCK_VERSION} --build-arg TCK_SHA=${params.TCK_SHA} . - """ - } - } - } - stage('Run TCK') { - steps { - sh """ \ - rm -Rf ./results - docker rm -f tck || true - docker volume rm -f tck-vol || true - docker volume create tck-vol - docker run -v ~/.m2/repository/org/hibernate:/root/.m2/repository/org/hibernate:z -v tck-vol:/tck/persistence-tck/tmp/:z -e NO_SLEEP=${params.NO_SLEEP} -e HIBERNATE_VERSION=$HIBERNATE_VERSION --name tck jakarta-tck-runner - docker cp tck:/tck/persistence-tck/tmp/ ./results - """ - archiveArtifacts artifacts: 'results/**' - script { - failures = sh ( - script: """ \ - set +x - while read line; do - if [[ "\$line" != *"Passed." ]]; then - echo "\$line" - fi - done . + */ + +/* + * See https://github.com/hibernate/hibernate-jenkins-pipeline-helpers + */ +@Library('hibernate-jenkins-pipeline-helpers@1.17') _ + +import org.hibernate.jenkins.pipeline.helpers.version.Version + +// -------------------------------------------- +// Global build configuration +env.PROJECT = "orm" +env.JIRA_KEY = "HHH" +def RELEASE_ON_SCHEDULE = true // Set to `true` *only* on branches where you want a scheduled release. + +print "INFO: env.PROJECT = ${env.PROJECT}" +print "INFO: env.JIRA_KEY = ${env.JIRA_KEY}" + +// -------------------------------------------- +// Build conditions + +// Avoid running the pipeline on branch indexing +if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { + print "INFO: Build skipped due to trigger being Branch Indexing" + currentBuild.result = 'NOT_BUILT' + return +} + +def manualRelease = currentBuild.getBuildCauses().toString().contains( 'UserIdCause' ) +def cronRelease = currentBuild.getBuildCauses().toString().contains( 'TimerTriggerCause' ) + +// Only do automatic release on branches where we opted in +if ( !manualRelease && !cronRelease ) { + print "INFO: Build skipped because automated releases on push are disabled on this branch." + currentBuild.result = 'NOT_BUILT' + return +} + +if ( !manualRelease && cronRelease && !RELEASE_ON_SCHEDULE ) { + print "INFO: Build skipped because automated releases are disabled on this branch. See constant RELEASE_ON_SCHEDULE in ci/release/Jenkinsfile" + currentBuild.result = 'NOT_BUILT' + return +} + +// -------------------------------------------- +// Reusable methods + +def checkoutReleaseScripts() { + dir('.release/scripts') { + checkout scmGit(branches: [[name: '*/main']], extensions: [], + userRemoteConfigs: [[credentialsId: 'ed25519.Hibernate-CI.github.com', + url: 'https://github.com/hibernate/hibernate-release-scripts.git']]) + } +} + +// -------------------------------------------- +// Pipeline + +pipeline { + agent { + label 'Release' + } + triggers { + // Run every week Sunday midnight + cron('0 0 * * 0') + } + tools { + jdk 'OpenJDK 11 Latest' + } + options { + buildDiscarder logRotator(daysToKeepStr: '30', numToKeepStr: '10') + disableConcurrentBuilds(abortPrevious: false) + preserveStashes() + } + parameters { + string( + name: 'RELEASE_VERSION', + defaultValue: '', + description: 'The version to be released, e.g. 6.2.1.Final. Mandatory for manual releases, to prevent mistakes.', + trim: true + ) + string( + name: 'DEVELOPMENT_VERSION', + defaultValue: '', + description: 'The next version to be used after the release, e.g. 6.2.2-SNAPSHOT. If not set, determined automatically from the release version.', + trim: true + ) + booleanParam( + name: 'RELEASE_DRY_RUN', + defaultValue: false, + description: 'If true, just simulate the release, without pushing any commits or tags, and without uploading any artifacts or documentation.' + ) + } + stages { + stage('Check') { + steps { + script { + print "INFO: params.RELEASE_VERSION = ${params.RELEASE_VERSION}" + print "INFO: params.DEVELOPMENT_VERSION = ${params.DEVELOPMENT_VERSION}" + print "INFO: params.RELEASE_DRY_RUN? = ${params.RELEASE_DRY_RUN}" + + checkoutReleaseScripts() + + def currentVersion = Version.parseDevelopmentVersion( sh( + script: ".release/scripts/determine-current-version.sh ${env.PROJECT}", + returnStdout: true + ).trim() ) + echo "Workspace version: ${currentVersion}" + + def releaseVersion + def developmentVersion + + if ( manualRelease ) { + echo "Release was requested manually" + + if ( !params.RELEASE_VERSION ) { + throw new IllegalArgumentException( + 'Missing value for parameter RELEASE_VERSION. This parameter must be set explicitly to prevent mistakes.' + ) + } + releaseVersion = Version.parseReleaseVersion( params.RELEASE_VERSION ) + + if ( !releaseVersion.toString().startsWith( currentVersion.family + '.' ) ) { + throw new IllegalArgumentException( "RELEASE_VERSION = $releaseVersion, which is different from the family of CURRENT_VERSION = $currentVersion. Did you make a mistake?" ) + } + } + else { + echo "Release was triggered automatically" + + // Avoid doing an automatic release if there are no "releasable" commits since the last release (see release scripts for determination) + def releasableCommitCount = sh( + script: ".release/scripts/count-releasable-commits.sh ${env.PROJECT}", + returnStdout: true + ).trim().toInteger() + if ( releasableCommitCount <= 0 ) { + print "INFO: Automatic release skipped because no releasable commits were pushed since the previous release" + currentBuild.getRawBuild().getExecutor().interrupt(Result.NOT_BUILT) + sleep(1) // Interrupt is not blocking and does not take effect immediately. + return + } + + releaseVersion = Version.parseReleaseVersion( sh( + script: ".release/scripts/determine-release-version.sh ${currentVersion}", + returnStdout: true + ).trim() ) + } + echo "Release version: ${releaseVersion}" + + if ( !params.DEVELOPMENT_VERSION ) { + developmentVersion = Version.parseDevelopmentVersion( sh( + script: ".release/scripts/determine-development-version.sh ${releaseVersion}", + returnStdout: true + ).trim() ) + } + else { + developmentVersion = Version.parseDevelopmentVersion( params.DEVELOPMENT_VERSION ) + } + echo "Development version: ${developmentVersion}" + + env.RELEASE_VERSION = releaseVersion.toString() + env.DEVELOPMENT_VERSION = developmentVersion.toString() + env.SCRIPT_OPTIONS = params.RELEASE_DRY_RUN ? "-d" : "" + env.JRELEASER_DRY_RUN = params.RELEASE_DRY_RUN + + // Determine version id to check if Jira version exists + sh ".release/scripts/determine-jira-version-id.sh ${env.JIRA_KEY} ${releaseVersion.withoutFinalQualifier}" + } + } + } + stage('Prepare') { + steps { + script { + checkoutReleaseScripts() + + configFileProvider([ + configFile(fileId: 'release.config.ssh', targetLocation: "${env.HOME}/.ssh/config"), + configFile(fileId: 'release.config.ssh.knownhosts', targetLocation: "${env.HOME}/.ssh/known_hosts") + ]) { + sshagent(['ed25519.Hibernate-CI.github.com', 'hibernate.filemgmt.jboss.org', 'hibernate-ci.frs.sourceforge.net']) { + // set release version + // update changelog from JIRA + // tags the version + // changes the version to the provided development version + withEnv([ + "DISABLE_REMOTE_GRADLE_CACHE=true", + // Increase the amount of memory for this part since asciidoctor doc rendering consumes a lot of metaspace + "GRADLE_OPTS=-Dorg.gradle.jvmargs='-Dlog4j2.disableJmx -Xmx4g -XX:MaxMetaspaceSize=768m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8'" + ]) { + sh ".release/scripts/prepare-release.sh -j -b ${env.GIT_BRANCH} -v ${env.DEVELOPMENT_VERSION} ${env.PROJECT} ${env.RELEASE_VERSION}" + } + } + } + } + } + } + stage('Publish') { + steps { + script { + checkoutReleaseScripts() + + configFileProvider([ + configFile(fileId: 'release.config.ssh', targetLocation: "${env.HOME}/.ssh/config"), + configFile(fileId: 'release.config.ssh.knownhosts', targetLocation: "${env.HOME}/.ssh/known_hosts") + ]) { + withCredentials([ + usernamePassword(credentialsId: 'central.sonatype.com', passwordVariable: 'JRELEASER_MAVENCENTRAL_TOKEN', usernameVariable: 'JRELEASER_MAVENCENTRAL_USERNAME'), + // https://docs.gradle.org/current/userguide/publishing_gradle_plugins.html#account_setup + usernamePassword(credentialsId: 'gradle-plugin-portal-api-key', passwordVariable: 'GRADLE_PUBLISH_SECRET', usernameVariable: 'GRADLE_PUBLISH_KEY'), + gitUsernamePassword(credentialsId: 'username-and-token.Hibernate-CI.github.com', gitToolName: 'Default'), + file(credentialsId: 'release.gpg.private-key', variable: 'RELEASE_GPG_PRIVATE_KEY_PATH'), + string(credentialsId: 'release.gpg.passphrase', variable: 'JRELEASER_GPG_PASSPHRASE'), + string(credentialsId: 'Hibernate-CI.github.com', variable: 'JRELEASER_GITHUB_TOKEN') + ]) { + sshagent(['ed25519.Hibernate-CI.github.com', 'hibernate.filemgmt.jboss.org', 'hibernate-ci.frs.sourceforge.net']) { + // performs documentation upload and Sonatype release + // push to github + withEnv([ + "DISABLE_REMOTE_GRADLE_CACHE=true" + ]) { + sh ".release/scripts/publish.sh -j ${env.SCRIPT_OPTIONS} ${env.PROJECT} ${env.RELEASE_VERSION} ${env.DEVELOPMENT_VERSION} ${env.GIT_BRANCH}" + } + } + } + } + } + } + } + stage('Release on Jira') { + steps { + script { + checkoutReleaseScripts() + + withCredentials([string(credentialsId: 'release-webhook.hibernate.atlassian.net', variable: 'JIRA_WEBHOOK_SECRET')]) { + sh ".release/scripts/jira-release.sh ${env.SCRIPT_OPTIONS} ${env.PROJECT} ${env.RELEASE_VERSION} ${env.DEVELOPMENT_VERSION}" + } + } + } + } + stage('Update website') { + steps { + script { + checkoutReleaseScripts() + + configFileProvider([ + configFile(fileId: 'release.config.ssh', targetLocation: "${env.HOME}/.ssh/config"), + configFile(fileId: 'release.config.ssh.knownhosts', targetLocation: "${env.HOME}/.ssh/known_hosts") + ]) { + withCredentials([ + gitUsernamePassword(credentialsId: 'username-and-token.Hibernate-CI.github.com', gitToolName: 'Default') + ]) { + sshagent( ['ed25519.Hibernate-CI.github.com', 'hibernate.filemgmt.jboss.org', 'hibernate-ci.frs.sourceforge.net'] ) { + dir( '.release/hibernate.org' ) { + checkout scmGit( + branches: [[name: '*/production']], + extensions: [], + userRemoteConfigs: [[credentialsId: 'ed25519.Hibernate-CI.github.com', url: 'https://github.com/hibernate/hibernate.org.git']] + ) + sh "../scripts/website-release.sh ${env.SCRIPT_OPTIONS} ${env.PROJECT} ${env.RELEASE_VERSION}" + } + } + } + } + } + } + } + stage('Create GitHub release') { + steps { + script { + checkoutReleaseScripts() + withCredentials([string(credentialsId: 'Hibernate-CI.github.com', variable: 'GITHUB_API_TOKEN')]) { + sh ".release/scripts/github-release.sh ${env.SCRIPT_OPTIONS} ${env.PROJECT} ${env.RELEASE_VERSION}" + } + } + } + } + } + post { + always { + configFileProvider([configFile(fileId: 'job-configuration.yaml', variable: 'JOB_CONFIGURATION_FILE')]) { + notifyBuildResult maintainers: (String) readYaml(file: env.JOB_CONFIGURATION_FILE).notification?.email?.recipients + } + } + } +} diff --git a/ci/snapshot-publish.Jenkinsfile b/ci/snapshot-publish.Jenkinsfile deleted file mode 100644 index 18e1119afab0..000000000000 --- a/ci/snapshot-publish.Jenkinsfile +++ /dev/null @@ -1,60 +0,0 @@ -/* - * See https://github.com/hibernate/hibernate-jenkins-pipeline-helpers - */ -@Library('hibernate-jenkins-pipeline-helpers@1.5') _ - -// Avoid running the pipeline on branch indexing -if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { - print "INFO: Build skipped due to trigger being Branch Indexing" - currentBuild.result = 'ABORTED' - return -} - -pipeline { - agent { - label 'Fedora' - } - tools { - jdk 'OpenJDK 11 Latest' - } - options { - rateLimitBuilds(throttle: [count: 1, durationName: 'hour', userBoost: true]) - buildDiscarder(logRotator(numToKeepStr: '3', artifactNumToKeepStr: '3')) - disableConcurrentBuilds(abortPrevious: true) - } - stages { - stage('Checkout') { - steps { - checkout scm - } - } - stage('Publish') { - steps { - withCredentials([ - usernamePassword(credentialsId: 'ossrh.sonatype.org', usernameVariable: 'hibernatePublishUsername', passwordVariable: 'hibernatePublishPassword'), - usernamePassword(credentialsId: 'plugins.gradle.org', usernameVariable: 'hibernatePluginPortalUsername', passwordVariable: 'hibernatePluginPortalPassword'), - string(credentialsId: 'ge.hibernate.org-access-key', variable: 'GRADLE_ENTERPRISE_ACCESS_KEY'), - string(credentialsId: 'release.gpg.passphrase', variable: 'SIGNING_PASS'), - file(credentialsId: 'release.gpg.private-key', variable: 'SIGNING_KEYRING') - ]) { - sh '''./gradlew clean publish \ - -PhibernatePublishUsername=$hibernatePublishUsername \ - -PhibernatePublishPassword=$hibernatePublishPassword \ - -Pgradle.publish.key=$hibernatePluginPortalUsername \ - -Pgradle.publish.secret=$hibernatePluginPortalPassword \ - --no-scan \ - -DsigningPassword=$SIGNING_PASS \ - -DsigningKeyFile=$SIGNING_KEYRING \ - ''' - } - } - } - } - post { - always { - configFileProvider([configFile(fileId: 'job-configuration.yaml', variable: 'JOB_CONFIGURATION_FILE')]) { - notifyBuildResult maintainers: (String) readYaml(file: env.JOB_CONFIGURATION_FILE).notification?.email?.recipients - } - } - } -} \ No newline at end of file diff --git a/databases/cockroachdb/matrix.gradle b/databases/cockroachdb/matrix.gradle index c6ed30abc639..d33f379e7faf 100644 --- a/databases/cockroachdb/matrix.gradle +++ b/databases/cockroachdb/matrix.gradle @@ -11,4 +11,4 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'org.postgresql:postgresql:42.2.8' \ No newline at end of file +jdbcDependency 'org.postgresql:postgresql:42.6.0' \ No newline at end of file diff --git a/databases/db2/matrix.gradle b/databases/db2/matrix.gradle index dfdbe2ebd0ea..507aca86ebb3 100644 --- a/databases/db2/matrix.gradle +++ b/databases/db2/matrix.gradle @@ -5,4 +5,4 @@ * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'com.ibm.db2:jcc:11.5.7.0' +jdbcDependency 'com.ibm.db2:jcc:11.5.8.0' diff --git a/databases/derby/matrix.gradle b/databases/derby/matrix.gradle index 6339f7a54eec..166aa5780efc 100644 --- a/databases/derby/matrix.gradle +++ b/databases/derby/matrix.gradle @@ -5,7 +5,7 @@ * See the lgpl.txt file in the root directory or . */ //databaseProfile { - jdbcDependency 'org.apache.derby:derby:10.14.2.0' + jdbcDependency 'org.apache.derby:derby:10.15.2.0' // testing { // beforeSuite { diff --git a/databases/hana/matrix.gradle b/databases/hana/matrix.gradle index 59d7bc269ecb..cb148ae76f85 100644 --- a/databases/hana/matrix.gradle +++ b/databases/hana/matrix.gradle @@ -5,4 +5,4 @@ * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'com.sap.cloud.db.jdbc:ngdbc:2.4.59' \ No newline at end of file +jdbcDependency 'com.sap.cloud.db.jdbc:ngdbc:2.16.14' \ No newline at end of file diff --git a/databases/mariadb/matrix.gradle b/databases/mariadb/matrix.gradle index 006c04d49237..267035b75a10 100644 --- a/databases/mariadb/matrix.gradle +++ b/databases/mariadb/matrix.gradle @@ -4,4 +4,4 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'org.mariadb.jdbc:mariadb-java-client:2.7.7' \ No newline at end of file +jdbcDependency 'org.mariadb.jdbc:mariadb-java-client:2.7.9' \ No newline at end of file diff --git a/databases/mssqlserver/matrix.gradle b/databases/mssqlserver/matrix.gradle index d5c0323a88f9..e4980f255942 100644 --- a/databases/mssqlserver/matrix.gradle +++ b/databases/mssqlserver/matrix.gradle @@ -5,4 +5,4 @@ * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'com.microsoft.sqlserver:mssql-jdbc:6.4.0.jre8' \ No newline at end of file +jdbcDependency 'com.microsoft.sqlserver:mssql-jdbc:12.2.0.jre11' \ No newline at end of file diff --git a/databases/mysql/matrix.gradle b/databases/mysql/matrix.gradle index 5fb4ffd93a6a..9f2384b4cacd 100644 --- a/databases/mysql/matrix.gradle +++ b/databases/mysql/matrix.gradle @@ -4,4 +4,4 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'mysql:mysql-connector-java:8.0.27' +jdbcDependency 'com.mysql:mysql-connector-j:8.2.0' diff --git a/databases/oracle/matrix.gradle b/databases/oracle/matrix.gradle index cf9b71080829..8717850f203a 100644 --- a/databases/oracle/matrix.gradle +++ b/databases/oracle/matrix.gradle @@ -4,4 +4,5 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'com.oracle.database.jdbc:ojdbc8:21.8.0.0' \ No newline at end of file +// Do not forget to update settings.gradle as well +jdbcDependency 'com.oracle.database.jdbc:ojdbc11:23.3.0.23.09' \ No newline at end of file diff --git a/databases/pgsql/matrix.gradle b/databases/pgsql/matrix.gradle index b8ac50d60726..4389905177b3 100644 --- a/databases/pgsql/matrix.gradle +++ b/databases/pgsql/matrix.gradle @@ -4,4 +4,4 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -jdbcDependency 'org.postgresql:postgresql:42.2.19' +jdbcDependency 'org.postgresql:postgresql:42.6.0' diff --git a/design/doc-query-expressions.adoc b/design/doc-query-expressions.adoc deleted file mode 100644 index 7e79333c9bcb..000000000000 --- a/design/doc-query-expressions.adoc +++ /dev/null @@ -1,33 +0,0 @@ -= Query - -== Expressions - -=== Paths - -=== Literals - -=== Parameters - -=== Unary expressions - -=== Arithmetic operations - -Numeric v. temporal - -=== Functions - -- Standard functions -- SqmFunctionRegistry -- Role of Dialect - -=== Concatenation operations - -=== Entity-type references - -=== CASE statements - -=== COALESCE statements - -=== NULLIF statements - -=== Sub-queries \ No newline at end of file diff --git a/design/doc-temporal.adoc b/design/doc-temporal.adoc deleted file mode 100644 index ff21ddba9879..000000000000 --- a/design/doc-temporal.adoc +++ /dev/null @@ -1,10 +0,0 @@ -`OffsetDateTime` is not safe to store in database. This form does not understand "zone rules" relating to things -such as DST. An offset of +5, e.g., does not change when DST starts/ends - its just +5. - -A `ZonedDateTime` on the other hand knows the actual timezone as well as the offset for the LocalDateTime portion in -that timezone. It is much more complete picture of the actual Instant. - -The proper solution for storing "with tz" would be to always use a `ZonedDateTime`, converted from `OffsetDateTime` -if needed. In this case, I assume we need to transform a `LocalDateTime` to `ZonedDateTime`? - -^^ what about Dialects that do not support "with tz" datatype variants? Are there any anymore? diff --git a/design/working/fk.adoc b/design/fk.adoc similarity index 95% rename from design/working/fk.adoc rename to design/fk.adoc index 826cc7d1a0f3..c160126912f4 100644 --- a/design/working/fk.adoc +++ b/design/fk.adoc @@ -31,7 +31,7 @@ Assuming bi-directionality, we have 2 `Association` refs: -There is a single ForeignKeyDescriptor instance for this FK in our metamodel, with 2 Sides: +There is a single `ForeignKeyDescriptor` instance for this FK in our metamodel, with 2 Sides: ``` ForeignKeyDescriptor ( diff --git a/design/sql-ast.adoc b/design/sql-ast.adoc index 8e4d44fde81f..0d869e604212 100644 --- a/design/sql-ast.adoc +++ b/design/sql-ast.adoc @@ -30,10 +30,11 @@ The actual tree nodes are defined in the `org.hibernate.sql.ast.tree` package. == Building SQL AST -There are 2 main producers of SQL AST atm: +There are 3 main producers of SQL AST: -* SQM translation - see `org.hibernate.query.sqm.sql` -* metamodel-based loading - see `org.hibernate.loader.internal.MetamodelSelectBuilderProcess` +SQM:: Translation of HQL and criteria queries. See `org.hibernate.query.sqm.sql` +Loading:: SQL generated for persistence-context events to load entities and collections. This includes `Session#find`, `Session#get`, `Session#lock`, ... See `org.hibernate.loader.internal.MetamodelSelectBuilderProcess` +Mutations:: SQL generated for persistence-context flush events to write entity and collection state to the database. See `org.hibernate.persister.entity.mutation` and `org.hibernate.persister.collection.mutation` == Translating SQL AST diff --git a/design/working/6.0-posts.adoc b/design/working/6.0-posts.adoc index d1d1ae52d7cd..690f6780b8a7 100644 --- a/design/working/6.0-posts.adoc +++ b/design/working/6.0-posts.adoc @@ -37,7 +37,7 @@ from the removal of deprecated stuff. There are a few one-off changes that brea source compatibility; these are covered in the link:{migration-guide-url}[migration guide]. One specific change to note is that many of these contracts have been better defined with type -parameters. Theses were inconsistently and sometimes poorly defined in previous versions. +parameters. Theses were inconsistently (and sometimes poorly) defined in previous versions. Quite a few SPI contracts have changed to support many of the topics discussed here as well as in the link:{migration-guide-url}[migration guide]. Many will also be the subject of the mentioned @@ -267,4 +267,4 @@ For additional details, see: - the link:{migration-guide-url}[Migration Guide] - the https://hibernate.org/orm/releases/6.0/[release page]. -To get in touch, use the usual channels as discussed on the https://hibernate.org/community/[website]. \ No newline at end of file +To get in touch, use the usual channels as discussed on the https://hibernate.org/community/[website]. diff --git a/docker_db.sh b/docker_db.sh index e250e4e1d13a..6992f3f4cf0c 100755 --- a/docker_db.sh +++ b/docker_db.sh @@ -16,12 +16,12 @@ else fi mysql() { - mysql_8_0 + mysql_8_1 } mysql_5_7() { $CONTAINER_CLI rm -f mysql || true - $CONTAINER_CLI run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d docker.io/mysql:5.7.40 --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake --log-bin-trust-function-creators=1 + $CONTAINER_CLI run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d ${DB_IMAGE_MYSQL_5_7:-docker.io/mysql:5.7.43} --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake --log-bin-trust-function-creators=1 # Give the container some time to start OUTPUT= n=0 @@ -45,7 +45,31 @@ mysql_5_7() { mysql_8_0() { $CONTAINER_CLI rm -f mysql || true - $CONTAINER_CLI run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d docker.io/mysql:8.0.31 --character-set-server=utf8mb4 --collation-server=utf8mb4_0900_as_cs --skip-character-set-client-handshake --log-bin-trust-function-creators=1 + $CONTAINER_CLI run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d ${DB_IMAGE_MYSQL_8_0:-docker.io/mysql:8.0.31} --character-set-server=utf8mb4 --collation-server=utf8mb4_0900_as_cs --skip-character-set-client-handshake --log-bin-trust-function-creators=1 + # Give the container some time to start + OUTPUT= + n=0 + until [ "$n" -ge 5 ] + do + # Need to access STDERR. Thanks for the snippet https://stackoverflow.com/a/56577569/412446 + { OUTPUT="$( { $CONTAINER_CLI logs mysql; } 2>&1 1>&3 3>&- )"; } 3>&1; + if [[ $OUTPUT == *"ready for connections"* ]]; then + break; + fi + n=$((n+1)) + echo "Waiting for MySQL to start..." + sleep 3 + done + if [ "$n" -ge 5 ]; then + echo "MySQL failed to start and configure after 15 seconds" + else + echo "MySQL successfully started" + fi +} + +mysql_8_1() { + $CONTAINER_CLI rm -f mysql || true + $CONTAINER_CLI run --name mysql -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d ${DB_IMAGE_MYSQL_8_1:-docker.io/mysql:8.1.0} --character-set-server=utf8mb4 --collation-server=utf8mb4_0900_as_cs --skip-character-set-client-handshake --log-bin-trust-function-creators=1 # Give the container some time to start OUTPUT= n=0 @@ -68,12 +92,12 @@ mysql_8_0() { } mariadb() { - mariadb_10_9 + mariadb_11_1 } mariadb_10_3() { $CONTAINER_CLI rm -f mariadb || true - $CONTAINER_CLI run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d docker.io/mariadb:10.3.36 --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake + $CONTAINER_CLI run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d ${DB_IMAGE_MARIADB_10_3:-docker.io/mariadb:10.3.39} --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake OUTPUT= n=0 until [ "$n" -ge 5 ] @@ -96,7 +120,30 @@ mariadb_10_3() { mariadb_10_9() { $CONTAINER_CLI rm -f mariadb || true - $CONTAINER_CLI run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d docker.io/mariadb:10.9.3 --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake + $CONTAINER_CLI run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d ${DB_IMAGE_MARIADB_10_9:-docker.io/mariadb:10.9.3} --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake + OUTPUT= + n=0 + until [ "$n" -ge 5 ] + do + # Need to access STDERR. Thanks for the snippet https://stackoverflow.com/a/56577569/412446 + { OUTPUT="$( { $CONTAINER_CLI logs mariadb; } 2>&1 1>&3 3>&- )"; } 3>&1; + if [[ $OUTPUT == *"ready for connections"* ]]; then + break; + fi + n=$((n+1)) + echo "Waiting for MariaDB to start..." + sleep 3 + done + if [ "$n" -ge 5 ]; then + echo "MariaDB failed to start and configure after 15 seconds" + else + echo "MariaDB successfully started" + fi +} + +mariadb_11_1() { + $CONTAINER_CLI rm -f mariadb || true + $CONTAINER_CLI run --name mariadb -e MYSQL_USER=hibernate_orm_test -e MYSQL_PASSWORD=hibernate_orm_test -e MYSQL_DATABASE=hibernate_orm_test -e MYSQL_ROOT_PASSWORD=hibernate_orm_test -p3306:3306 -d ${DB_IMAGE_MARIADB_11_1:-docker.io/mariadb:11.1.2} --character-set-server=utf8mb4 --collation-server=utf8mb4_bin --skip-character-set-client-handshake OUTPUT= n=0 until [ "$n" -ge 5 ] @@ -123,31 +170,32 @@ postgresql() { postgresql_9_5() { $CONTAINER_CLI rm -f postgres || true - $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d docker.io/postgis/postgis:9.5-2.5 + $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d ${DB_IMAGE_POSTGRESQL_9_5:-docker.io/postgis/postgis:9.5-2.5} } postgresql_10() { $CONTAINER_CLI rm -f postgres || true - $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d docker.io/postgis/postgis:10-2.5 + $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d ${DB_IMAGE_POSTGRESQL_10:-docker.io/postgis/postgis:10-2.5} } postgresql_13() { $CONTAINER_CLI rm -f postgres || true - $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d docker.io/postgis/postgis:13-3.1 + $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d ${DB_IMAGE_POSTGRESQL_13:-docker.io/postgis/postgis:13-3.1} } postgresql_14() { $CONTAINER_CLI rm -f postgres || true - $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d docker.io/postgis/postgis:14-3.3 + $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d ${DB_IMAGE_POSTGRESQL_14:-docker.io/postgis/postgis:14-3.3} } postgresql_15() { $CONTAINER_CLI rm -f postgres || true - $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 -d docker.io/postgis/postgis:15-3.3 + $CONTAINER_CLI run --name postgres -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p5432:5432 --tmpfs /pgtmpfs:size=131072k -d ${DB_IMAGE_POSTGRESQL_15:-docker.io/postgis/postgis:15-3.3} \ + -c fsync=off -c synchronous_commit=off -c full_page_writes=off -c shared_buffers=256MB -c maintenance_work_mem=256MB -c max_wal_size=1GB -c checkpoint_timeout=1d } edb() { - edb_14 + edb_15 } edb_10() { @@ -164,13 +212,20 @@ edb_14() { $CONTAINER_CLI run --name edb -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p 5444:5444 -d edb-test:14 } +edb_15() { + $CONTAINER_CLI rm -f edb || true + # We need to build a derived image because the existing image is mainly made for use by a kubernetes operator + (cd edb; $CONTAINER_CLI build -t edb-test:15 -f edb15.Dockerfile .) + $CONTAINER_CLI run --name edb -e POSTGRES_USER=hibernate_orm_test -e POSTGRES_PASSWORD=hibernate_orm_test -e POSTGRES_DB=hibernate_orm_test -p 5444:5444 -d edb-test:15 +} + db2() { db2_11_5 } db2_11_5() { $PRIVILEGED_CLI $CONTAINER_CLI rm -f db2 || true - $PRIVILEGED_CLI $CONTAINER_CLI run --name db2 --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false -p 50000:50000 -d docker.io/ibmcom/db2:11.5.7.0 + $PRIVILEGED_CLI $CONTAINER_CLI run --name db2 --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false -p 50000:50000 -d icr.io/db2_community/db2:11.5.9.0 # Give the container some time to start OUTPUT= while [[ $OUTPUT != *"INSTANCE"* ]]; do @@ -178,13 +233,13 @@ db2_11_5() { sleep 10 OUTPUT=$($PRIVILEGED_CLI $CONTAINER_CLI logs db2 2>&1) done - $PRIVILEGED_CLI $CONTAINER_CLI exec -t db2 su - orm_test bash -c ". /database/config/orm_test/sqllib/db2profile && /database/config/orm_test/sqllib/bin/db2 'connect to orm_test' && /database/config/orm_test/sqllib/bin/db2 'CREATE USER TEMPORARY TABLESPACE usr_tbsp MANAGED BY AUTOMATIC STORAGE'" + $PRIVILEGED_CLI $CONTAINER_CLI exec -t db2 su - orm_test bash -c ". /database/config/orm_test/sqllib/db2profile; /database/config/orm_test/sqllib/bin/db2 'connect to orm_test'; /database/config/orm_test/sqllib/bin/db2 'CREATE USER TEMPORARY TABLESPACE usr_tbsp MANAGED BY AUTOMATIC STORAGE'" } db2_10_5() { $PRIVILEGED_CLI $CONTAINER_CLI rm -f db2 || true # The sha represents the tag 10.5.0.5-3.10.0 - $PRIVILEGED_CLI $CONTAINER_CLI run --name db2 --privileged -e DB2INST1_PASSWORD=db2inst1-pwd -e LICENSE=accept -p 50000:50000 -d docker.io/ibmoms/db2express-c@sha256:a499afd9709a1f69fb41703e88def9869955234c3525547e2efc3418d1f4ca2b db2start + $PRIVILEGED_CLI $CONTAINER_CLI run --name db2 --privileged -e DB2INST1_PASSWORD=db2inst1-pwd -e LICENSE=accept -p 50000:50000 -d ${DB_IMAGE_DB2_10_5:-docker.io/ibmoms/db2express-c@sha256:a499afd9709a1f69fb41703e88def9869955234c3525547e2efc3418d1f4ca2b} db2start # Give the container some time to start OUTPUT= while [[ $OUTPUT != *"DB2START"* ]]; do @@ -245,7 +300,7 @@ CREATE TRANSFORM FOR db2gse.ST_Geometry DB2_PROGRAM ( EOF $PRIVILEGED_CLI $CONTAINER_CLI run --name db2spatial --privileged -e DB2INSTANCE=orm_test -e DB2INST1_PASSWORD=orm_test -e DBNAME=orm_test -e LICENSE=accept -e AUTOCONFIG=false -e ARCHIVE_LOGS=false -e TO_CREATE_SAMPLEDB=false -e REPODB=false \ -v ${temp_dir}:/conf \ - -p 50000:50000 -d docker.io/ibmcom/db2:11.5.5.0 + -p 50000:50000 -d ${DB_IMAGE_DB2_SPATIAL:-docker.io/ibmcom/db2:11.5.5.0} # Give the container some time to start OUTPUT= @@ -269,7 +324,7 @@ mssql() { mssql_2017() { $CONTAINER_CLI rm -f mssql || true #This sha256 matches a specific tag of mcr.microsoft.com/mssql/server:2017-latest : - $CONTAINER_CLI run --name mssql -d -p 1433:1433 -e "SA_PASSWORD=Hibernate_orm_test" -e ACCEPT_EULA=Y mcr.microsoft.com/mssql/server@sha256:7d194c54e34cb63bca083542369485c8f4141596805611e84d8c8bab2339eede + $CONTAINER_CLI run --name mssql -d -p 1433:1433 -e "SA_PASSWORD=Hibernate_orm_test" -e ACCEPT_EULA=Y ${DB_IMAGE_MSSQL_2017:-mcr.microsoft.com/mssql/server@sha256:7d194c54e34cb63bca083542369485c8f4141596805611e84d8c8bab2339eede} sleep 5 n=0 until [ "$n" -ge 5 ] @@ -291,7 +346,7 @@ mssql_2017() { mssql_2022() { $CONTAINER_CLI rm -f mssql || true #This sha256 matches a specific tag of mcr.microsoft.com/mssql/server:2022-latest : - $CONTAINER_CLI run --name mssql -d -p 1433:1433 -e "SA_PASSWORD=Hibernate_orm_test" -e ACCEPT_EULA=Y mcr.microsoft.com/mssql/server@sha256:5439be9edc3b514cf647bcd3651779fa13f487735a985f40cbdcfecc60fea273 + $CONTAINER_CLI run --name mssql -d -p 1433:1433 -e "SA_PASSWORD=Hibernate_orm_test" -e ACCEPT_EULA=Y ${DB_IMAGE_MSSQL_2022:-mcr.microsoft.com/mssql/server@sha256:b94071acd4612bfe60a73e265097c2b6388d14d9d493db8f37cf4479a4337480} sleep 5 n=0 until [ "$n" -ge 5 ] @@ -313,7 +368,7 @@ mssql_2022() { sybase() { $CONTAINER_CLI rm -f sybase || true # Yup, that sucks, but on ubuntu we need to use -T11889 as per: https://github.com/DataGrip/docker-env/issues/12 - $CONTAINER_CLI run -d -p 5000:5000 -p 5001:5001 --name sybase --entrypoint /bin/bash docker.io/nguoianphu/docker-sybase -c "source /opt/sybase/SYBASE.sh + $CONTAINER_CLI run -d -p 9000:5000 -p 9001:5001 --name sybase --entrypoint /bin/bash ${DB_IMAGE_SYBASE:-docker.io/nguoianphu/docker-sybase} -c "source /opt/sybase/SYBASE.sh /opt/sybase/ASE-16_0/bin/dataserver \ -d/opt/sybase/data/master.dat \ -e/opt/sybase/ASE-16_0/install/MYSYBASE.log \ @@ -440,18 +495,23 @@ oracle_setup() { sleep 5; # On WSL, health-checks intervals don't work for Podman, so run them manually if command -v podman > /dev/null; then - $CONTAINER_CLI healthcheck run oracle > /dev/null + $PRIVILEGED_CLI $CONTAINER_CLI healthcheck run oracle > /dev/null fi - HEALTHSTATUS="`$CONTAINER_CLI inspect -f $HEALTCHECK_PATH oracle`" + HEALTHSTATUS="`$PRIVILEGED_CLI $CONTAINER_CLI inspect -f $HEALTCHECK_PATH oracle`" HEALTHSTATUS=${HEALTHSTATUS##+( )} #Remove longest matching series of spaces from the front HEALTHSTATUS=${HEALTHSTATUS%%+( )} #Remove longest matching series of spaces from the back done sleep 2; echo "Oracle successfully started" # We increase file sizes to avoid online resizes as that requires lots of CPU which is restricted in XE - $CONTAINER_CLI exec oracle bash -c "source /home/oracle/.bashrc; bash -c \" + $PRIVILEGED_CLI $CONTAINER_CLI exec oracle bash -c "source /home/oracle/.bashrc; bash -c \" cat < /opt/oracle/oradata/dbconfig/XE/sqlnet.ora +!lsnrctl reload + -- Increasing redo logs alter database add logfile group 4 '\$ORACLE_BASE/oradata/XE/redo04.log' size 500M reuse; alter database add logfile group 5 '\$ORACLE_BASE/oradata/XE/redo05.log' size 500M reuse; @@ -507,6 +567,85 @@ grant all privileges to hibernate_orm_test; EOF\"" } +oracle_free_setup() { + HEALTHSTATUS= + until [ "$HEALTHSTATUS" == "healthy" ]; + do + echo "Waiting for Oracle Free to start..." + sleep 5; + # On WSL, health-checks intervals don't work for Podman, so run them manually + if command -v podman > /dev/null; then + $PRIVILEGED_CLI $CONTAINER_CLI healthcheck run oracle > /dev/null + fi + HEALTHSTATUS="`$PRIVILEGED_CLI $CONTAINER_CLI inspect -f $HEALTCHECK_PATH oracle`" + HEALTHSTATUS=${HEALTHSTATUS##+( )} #Remove longest matching series of spaces from the front + HEALTHSTATUS=${HEALTHSTATUS%%+( )} #Remove longest matching series of spaces from the back + done + sleep 2; + echo "Oracle successfully started" + # We increase file sizes to avoid online resizes as that requires lots of CPU which is restricted in XE + $PRIVILEGED_CLI $CONTAINER_CLI exec oracle bash -c "source /home/oracle/.bashrc; bash -c \" +cat < /opt/oracle/oradata/dbconfig/FREE/sqlnet.ora +!lsnrctl reload +-- Increasing redo logs +alter database add logfile group 4 '\$ORACLE_BASE/oradata/FREE/redo04.log' size 500M reuse; +alter database add logfile group 5 '\$ORACLE_BASE/oradata/FREE/redo05.log' size 500M reuse; +alter database add logfile group 6 '\$ORACLE_BASE/oradata/FREE/redo06.log' size 500M reuse; +alter system switch logfile; +alter system switch logfile; +alter system switch logfile; +alter system checkpoint; +alter database drop logfile group 1; +alter database drop logfile group 2; +alter database drop logfile group 3; +!rm \$ORACLE_BASE/oradata/FREE/redo01.log +!rm \$ORACLE_BASE/oradata/FREE/redo02.log +!rm \$ORACLE_BASE/oradata/FREE/redo03.log + +-- Increasing SYSAUX data file +alter database datafile '\$ORACLE_BASE/oradata/FREE/sysaux01.dbf' resize 600M; + +-- Modifying database init parameters +alter system set open_cursors=1000 sid='*' scope=both; +alter system set session_cached_cursors=500 sid='*' scope=spfile; +alter system set db_securefile=ALWAYS sid='*' scope=spfile; +alter system set dispatchers='(PROTOCOL=TCP)(SERVICE=FREEXDB)(DISPATCHERS=0)' sid='*' scope=spfile; +alter system set recyclebin=OFF sid='*' SCOPE=SPFILE; + +-- Comment the 2 next lines to be able to use Diagnostics Pack features +alter system set sga_target=0m sid='*' scope=both; +-- alter system set statistics_level=BASIC sid='*' scope=spfile; + +-- Restart the database +SHUTDOWN IMMEDIATE; +STARTUP MOUNT; +ALTER DATABASE OPEN; + +-- Switch to the FREEPDB1 pluggable database +alter session set container=freepdb1; + +-- Modify FREEPDB1 datafiles and tablespaces +alter database datafile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/system01.dbf' resize 320M; +alter database datafile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/sysaux01.dbf' resize 360M; +alter database datafile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/undotbs01.dbf' resize 400M; +alter database datafile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/undotbs01.dbf' autoextend on next 16M; +alter database tempfile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/temp01.dbf' resize 400M; +alter database tempfile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/temp01.dbf' autoextend on next 16M; +alter database datafile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/users01.dbf' resize 100M; +alter database datafile '\$ORACLE_BASE/oradata/FREE/FREEPDB1/users01.dbf' autoextend on next 16M; +alter tablespace USERS nologging; +alter tablespace SYSTEM nologging; +alter tablespace SYSAUX nologging; + +create user hibernate_orm_test identified by hibernate_orm_test quota unlimited on users; +grant all privileges to hibernate_orm_test; +EOF\"" +} + oracle_setup_old() { HEALTHSTATUS= until [ "$HEALTHSTATUS" == "healthy" ]; @@ -515,14 +654,14 @@ oracle_setup_old() { sleep 5; # On WSL, health-checks intervals don't work for Podman, so run them manually if command -v podman > /dev/null; then - $CONTAINER_CLI healthcheck run oracle > /dev/null + $PRIVILEGED_CLI $CONTAINER_CLI healthcheck run oracle > /dev/null fi - HEALTHSTATUS="`$CONTAINER_CLI inspect -f $HEALTCHECK_PATH oracle`" + HEALTHSTATUS="`$PRIVILEGED_CLI $CONTAINER_CLI inspect -f $HEALTCHECK_PATH oracle`" HEALTHSTATUS=${HEALTHSTATUS##+( )} #Remove longest matching series of spaces from the front HEALTHSTATUS=${HEALTHSTATUS%%+( )} #Remove longest matching series of spaces from the back done # We increase file sizes to avoid online resizes as that requires lots of CPU which is restricted in XE - $CONTAINER_CLI exec oracle bash -c "source /home/oracle/.bashrc; bash -c \" + $PRIVILEGED_CLI $CONTAINER_CLI exec oracle bash -c "source /home/oracle/.bashrc; bash -c \" cat < /etc/docker/daemon.json" + sudo service docker start + elif ! grep -q userland-proxy /etc/docker/daemon.json; then + export docker_daemon_json=$( /etc/docker/daemon.json' + sudo service docker start + fi + fi +} + +oracle_atps() { + echo "Managing Oracle Autonomous Database..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=autonomous2&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') + export PASSWORD=$(echo $INFO | jq -r '.database' | jq -r '.password') + + curl -k -s -X POST "https://${HOST}.oraclevcn.com:8443/ords/admin/_/sql" -H 'content-type: application/sql' -H 'accept: application/json' -basic -u admin:${PASSWORD} --data-ascii "create user hibernate_orm_test_$RUNID identified by \"Oracle_19_Password\" DEFAULT TABLESPACE DATA TEMPORARY TABLESPACE TEMP;alter user hibernate_orm_test_$RUNID quota unlimited on data;grant CREATE SESSION, RESOURCE, CREATE VIEW, CREATE SYNONYM to hibernate_orm_test_$RUNID;" +} + +oracle_atps_tls() { + echo "Managing Oracle Autonomous Database..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=autonomous&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') + export PASSWORD=$(echo $INFO | jq -r '.database' | jq -r '.password') + + curl -s -X POST "https://${HOST}.oraclecloudapps.com/ords/admin/_/sql" -H 'content-type: application/sql' -H 'accept: application/json' -basic -u admin:${PASSWORD} --data-ascii "create user hibernate_orm_test_$RUNID identified by \"Oracle_19_Password\" DEFAULT TABLESPACE DATA TEMPORARY TABLESPACE TEMP;alter user hibernate_orm_test_$RUNID quota unlimited on data;grant CREATE SESSION, RESOURCE, CREATE VIEW, CREATE SYNONYM, CREATE DOMAIN to hibernate_orm_test_$RUNID;" +} + +oracle_db19c() { + echo "Managing Oracle Database 19c..." + export INFO=$(curl -s -k -L -X GET "https://api.atlas-controller.oraclecloud.com/ords/atlas/admin/database?type=db19c&hostname=`hostname`" -H 'accept: application/json') + export HOST=$(echo $INFO | jq -r '.database' | jq -r '.host') + export SERVICE=$(echo $INFO | jq -r '.database' | jq -r '.service') + export PASSWORD=$(echo $INFO | jq -r '.database' | jq -r '.password') + +/home/opc/sqlcl/bin/sql -s system/$PASSWORD@$HOST:1521/$SERVICE <&1) + done + echo "Enabling experimental box2d operators and some optimized settings for running the tests" + #settings documented in https://www.cockroachlabs.com/docs/v22.1/local-testing.html#use-a-local-single-node-cluster-with-in-memory-storage + $CONTAINER_CLI exec cockroach bash -c "cat <&1) + if [[ $OUTPUT == *"server is running"* ]]; then + break; + fi + n=$((n+1)) + echo "Waiting for TiDB to start..." + sleep 3 + done + $CONTAINER_CLI run --link tidb:tidb -it --rm docker.io/mysql:8.0.31 mysql -htidb -P4000 -uroot -e "create database hibernate_orm_test; create user 'hibernate_orm_test' identified by 'hibernate_orm_test'; grant all on hibernate_orm_test.* to 'hibernate_orm_test';" + if [ "$n" -ge 5 ]; then + echo "TiDB failed to start and configure after 15 seconds" + else + echo "TiDB successfully started" + fi +} + if [ -z ${1} ]; then echo "No db name provided" echo "Provide one of:" echo -e "\tcockroachdb" + echo -e "\tcockroachdb_23_1" echo -e "\tcockroachdb_22_2" echo -e "\tcockroachdb_22_1" echo -e "\tcockroachdb_21_1" @@ -781,21 +1073,24 @@ if [ -z ${1} ]; then echo -e "\tdb2_10_5" echo -e "\tdb2_spatial" echo -e "\tedb" + echo -e "\tedb_15" echo -e "\tedb_14" echo -e "\tedb_10" echo -e "\thana" echo -e "\tmariadb" + echo -e "\tmariadb_11_1" echo -e "\tmariadb_10_9" echo -e "\tmariadb_10_3" echo -e "\tmssql" echo -e "\tmssql_2022" echo -e "\tmssql_2017" echo -e "\tmysql" + echo -e "\tmysql_8_1" echo -e "\tmysql_8_0" echo -e "\tmysql_5_7" echo -e "\toracle" + echo -e "\toracle_23" echo -e "\toracle_21" - echo -e "\toracle_18" echo -e "\toracle_11" echo -e "\tpostgresql" echo -e "\tpostgresql_15" @@ -804,6 +1099,8 @@ if [ -z ${1} ]; then echo -e "\tpostgresql_10" echo -e "\tpostgresql_9_5" echo -e "\tsybase" + echo -e "\ttidb" + echo -e "\ttidb_5_1" else ${1} fi diff --git a/documentation/documentation.gradle b/documentation/documentation.gradle index 15ee582734fb..d533dda3ab9f 100644 --- a/documentation/documentation.gradle +++ b/documentation/documentation.gradle @@ -1,7 +1,28 @@ +import java.util.function.Function + import org.asciidoctor.gradle.jvm.AsciidoctorTask +import org.asciidoctor.gradle.jvm.pdf.AsciidoctorPdfTask + +buildscript { + configurations.all { + resolutionStrategy.dependencySubstitution { + substitute module( 'com.burgstaller:okhttp-digest:1.10' ) using module( + 'io.github.rburgst:okhttp-digest:1.21' + ) because 'okhttp-digest only version 1.21 is available on Maven Central. Old version was on jcenter, which asciidoctor-gradle-plugin depends on transitively through simplified-jruby-gradle-plugin via http-builder-ng-okhttp ' + } + } +} plugins { - id 'org.asciidoctor.jvm.convert' version '3.3.2' + id 'org.asciidoctor.jvm.convert' version '4.0.2' + id 'org.asciidoctor.jvm.pdf' version '4.0.2' + id "org.asciidoctor.jvm.gems" version "4.0.2" + id "org.hibernate.orm.build.settings-doc" +} + +repositories { + mavenCentral() + ruby.gems() } /* @@ -12,17 +33,96 @@ plugins { */ -apply from: rootProject.file( 'gradle/java-module.gradle' ) +apply from: rootProject.file( 'gradle/module.gradle' ) apply from: rootProject.file( 'gradle/releasable.gradle' ) -apply plugin: 'org.hibernate.matrix-test' apply plugin: 'org.hibernate.orm.build.reports' -apply plugin: 'org.hibernate.orm.build.properties' -tasks.build.dependsOn 'buildDocs' + defaultTasks 'buildDocs' +configurations { + core + + testing + + envers + spatial + + agroal + c3p0 + hikaricp + proxool + vibur + + jcache + + jpamodelgen + + javadocClasspath { + description = 'Class files for the javadoc to be built' + resolutionStrategy.capabilitiesResolution.withCapability('org.junit.jupiter:junit-jupiter-params:5.7.1') { details -> + details.select( details.candidates.first() ).because( 'first' ) + } + + extendsFrom core + extendsFrom testing + extendsFrom envers + extendsFrom spatial + extendsFrom agroal + extendsFrom c3p0 + extendsFrom hikaricp + extendsFrom proxool + extendsFrom vibur + extendsFrom jcache + extendsFrom jpamodelgen + } + + javadocSources { + description = 'Source files to be built by the javadoc tool' + } +} + dependencies { + attributesSchema { schema -> + schema.attribute(Bundling.BUNDLING_ATTRIBUTE) { matchStrategy -> + final def nameComparator = Comparator.comparing( + new Function() { + @Override + String apply(Bundling o) { + return o.name + } + } + ) + matchStrategy.ordered(new Comparator() { + @Override + int compare(Bundling o1, Bundling o2) { + if ( Objects.equals( o1, o2 ) ) { + return 0; + } + + if ( o1 == null ) { + return 1; + } + + if ( o2 == null ) { + return -1; + } + + if ( o1.name == Bundling.EMBEDDED ) { + return -1; + } + + if ( o2.name == Bundling.EMBEDDED ) { + return 1; + } + + return nameComparator.compare(o1,o2) + } + } ) + } + } + ext.pressgangVersion = '3.0.0' reportAggregation project( ':hibernate-agroal' ) @@ -40,151 +140,464 @@ dependencies { reportAggregation project(':hibernate-enhance-maven-plugin') reportAggregation project(':hibernate-jpamodelgen') - implementation project( ':hibernate-core' ) + asciidoctorGems 'rubygems:rouge:4.1.1' - annotationProcessor project( ':hibernate-jpamodelgen' ) + core project( ':hibernate-core' ) + javadocSources project( path: ':hibernate-core', configuration: 'javadocSources' ) - testImplementation project(':hibernate-testing') - testImplementation project(':hibernate-envers') - testImplementation project(':hibernate-spatial') - testImplementation project(':hibernate-jcache') - testImplementation project( path: ':hibernate-core', configuration: 'tests' ) + testing project( ':hibernate-testing' ) + javadocSources project( path: ':hibernate-testing', configuration: 'javadocSources' ) - testImplementation 'org.apache.commons:commons-lang3:3.4' - testImplementation 'org.osgi:org.osgi.core:4.3.1' + envers project( ':hibernate-envers' ) + javadocSources project( path: ':hibernate-envers', configuration: 'javadocSources' ) - testImplementation testLibs.mockito - testImplementation testLibs.mockitoInline + spatial project( ':hibernate-spatial' ) + javadocSources project( path: ':hibernate-spatial', configuration: 'javadocSources' ) - testImplementation jakartaLibs.jaxbApi - testImplementation jakartaLibs.jaxb - testImplementation jakartaLibs.jsonb - testImplementation libs.jacksonXml + agroal project( ':hibernate-agroal' ) + javadocSources project( path: ':hibernate-agroal', configuration: 'javadocSources' ) - testRuntimeOnly testLibs.wildFlyTxnClient - testRuntimeOnly(libs.ehcache3) { - capabilities { - requireCapability 'org.ehcache.modules:ehcache-xml-jakarta' - } - } - // Needed for JSON tests - testRuntimeOnly libs.jackson + c3p0 project( ':hibernate-c3p0' ) + javadocSources project( path: ':hibernate-c3p0', configuration: 'javadocSources' ) + + hikaricp project( ':hibernate-hikaricp' ) + javadocSources project( path: ':hibernate-hikaricp', configuration: 'javadocSources' ) + + proxool project( ':hibernate-proxool' ) + javadocSources project( path: ':hibernate-proxool', configuration: 'javadocSources' ) + + vibur project( ':hibernate-vibur' ) + javadocSources project( path: ':hibernate-vibur', configuration: 'javadocSources' ) + + jcache project( ':hibernate-jcache' ) + javadocSources project( path: ':hibernate-jcache', configuration: 'javadocSources' ) + + jpamodelgen project( ':hibernate-jpamodelgen' ) + javadocSources project( path: ':hibernate-jpamodelgen', configuration: 'javadocSources' ) + + javadocClasspath libs.loggingAnnotations + javadocClasspath jakartaLibs.validation + javadocClasspath jakartaLibs.cdi + javadocClasspath jakartaLibs.jacc + javadocClasspath jakartaLibs.jsonbApi + javadocClasspath libs.ant + javadocClasspath dbLibs.postgresql + javadocClasspath libs.jackson + javadocClasspath gradleApi() + javadocClasspath libs.jacksonXml + javadocClasspath dbLibs.oracle } if ( project.ormVersion.isSnapshot ) { // only run the ci build tasks for SNAPSHOT versions - task ciBuild( dependsOn: [clean, test] ) + tasks.register('ciBuild') { dependsOn clean } tasks.release.enabled false } else { - tasks.release.dependsOn clean, test + tasks.release.dependsOn clean } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// grouping tasks - declaration, see below for task dependency definitions +// aggregated Javadoc // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -task buildDocs { - group 'Documentation' - description 'Grouping task for performing all documentation building tasks' -} +def aggregateJavadocsTask = tasks.register( "javadoc", Javadoc ) { + group = "documentation" + description = 'Builds JavaDoc aggregated across all ORM sub-projects' + + destinationDir = mkdir( layout.buildDirectory.file( 'javadocs' ) ) + source = configurations.javadocSources + classpath = configurations.javadocClasspath + + configure( options ) { + overview = rootProject.file( "shared/javadoc/overview.html" ) + windowTitle = 'Hibernate Javadocs' + docTitle = "Hibernate Javadoc ($project.version)" + } -task buildDocsForPublishing { - group 'Documentation' - description 'Grouping task for building all documentation for publishing (release)' + if ( jdkVersions.explicit ) { +// setJFlags( +// getProperty( 'toolchain.javadoc.jvmargs' ).toString(). +// split( ' ' ).toList().findAll( { !it.isEmpty() } ) +// ) + + // Display version of Java tools + doFirst { + if ( javadocTool.present ) { + logger.lifecycle "Building aggregated javadoc with '${javadocTool.get().metadata.installationPath}'" + } + } + } + + doFirst { + def javaLanguageVersion = javadocTool.present + ? javadocTool.get().metadata.languageVersion + : JavaLanguageVersion.of( JavaVersion.current().name ) + + if ( javaLanguageVersion.asInt() > 11 ) { + println "Aggregated Javadocs are bing built` using a JDK newer than version 11: \n" + + "\t* `stylesheet.css` will not be compatible\n" + + "\t* generating the User Guide settings fragment will not succeed" + } + } } +apply from: rootProject.file( 'gradle/javadoc.gradle' ) + asciidoctorj { + requires 'rouge' + modules { + pdf { + version '2.3.7' + } + } attributes icons: 'font', experimental: true, - 'source-highlighter': 'prettify', + 'source-highlighter': 'rouge', majorMinorVersion: rootProject.ormVersion.family, fullVersion: rootProject.ormVersion.fullName options logDocuments: true } -// Collect config properties ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -task collectConfigProperties { task -> - group 'Documentation' - description 'Collect config properties' - - // make sure that the javadocs are generated prior to collecting properties. - dependsOn ':hibernate-core:javadoc' - dependsOn ':hibernate-envers:javadoc' - dependsOn ':hibernate-jcache:javadoc' - - dependsOn tasks.generateConfigPropertiesMap - dependsOn tasks.writeConfigPropertiesMap - - tasks.buildDocs.dependsOn task - tasks.buildDocsForPublishing.dependsOn task - -} // Topical Guides ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -task renderTopicalGuides(type: AsciidoctorTask, group: 'Documentation') {task-> - description = 'Renders the Topical Guides in HTML format using Asciidoctor.' - tasks.buildDocs.dependsOn task - tasks.buildDocsForPublishing.dependsOn task +def renderTopicalGuideHtmlTask = tasks.register( 'renderTopicalGuideHtml', AsciidoctorTask ) { task -> + group = "Documentation" + description = 'Renders the Topical Guides in HTML format using Asciidoctor.' + inputs.property "hibernate-version", project.ormVersion - sourceDir = file( 'src/main/asciidoc/topical' ) - outputDir = new File("$buildDir/asciidoc/topical/html_single") + sourceDir = file( 'src/main/asciidoc/topical' ) + outputDir = new File( "$buildDir/asciidoc/topical/html_single" ) resources { - from('src/main/asciidoc/topical/') { + from( 'src/main/asciidoc/topical/' ) { include '**/images/**' } } } +def renderTopicalGuidesTask = tasks.register( 'renderTopicalGuides', AsciidoctorTask ) { task -> + task.group = "Documentation" + task.description = 'Renders the Topical Guides in all formats.' + task.dependsOn renderTopicalGuideHtmlTask +} -// Getting Started Guides (quick starts) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -task renderGettingStartedGuides(type: AsciidoctorTask, group: 'Documentation') {task-> - description = 'Renders the Getting Started Guides (quick starts) in HTML format using Asciidoctor.' - tasks.buildDocs.dependsOn task - tasks.buildDocsForPublishing.dependsOn task - sourceDir = file( 'src/main/asciidoc/quickstart/guides' ) - sources { - include 'index.adoc' - } - outputDir = new File("$buildDir/asciidoc/quickstart/html_single") -} +// Getting Started Guides (quick starts) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +def gettingStartedGuideHtmlDir = layout.buildDirectory.dir( "asciidoc/quickstart/html_single" ) -task buildTutorialZip(type: Zip) {task-> +def buildTutorialZipTask = tasks.register( 'buildTutorialZip', Zip ) { task -> from 'src/main/asciidoc/quickstart/tutorials' - destinationDirectory = tasks.renderGettingStartedGuides.outputDir + destinationDirectory.set( gettingStartedGuideHtmlDir ) archiveFileName = 'hibernate-tutorials.zip' expand( version: project.version, - slf4j: "1.7.5", + slf4j: "2.0.7", junit: testLibs.versions.junit4.get(), h2: dbLibs.versions.h2.get() ) - tasks.renderGettingStartedGuides.dependsOn task } +def renderGettingStartedGuideHtmlTask = tasks.register( 'renderGettingStartedGuideHtmlTask', AsciidoctorTask ) { task -> + group = "Documentation" + description = 'Renders the Getting Started Guides (quickstarts) in HTML format using Asciidoctor.' + inputs.property "hibernate-version", project.ormVersion + finalizedBy buildTutorialZipTask -// User Guide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + sourceDir = file( 'src/main/asciidoc/quickstart/guides' ) + sources 'index.adoc' + + outputDir = gettingStartedGuideHtmlDir + + attributes linkcss: true, + stylesheet: "css/hibernate.css", + docinfo: 'private', + jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/" + + resources { + from( 'src/main/style/asciidoctor' ) { + include 'images/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'css/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'js/**' + } + } +} + +def renderGettingStartedGuidePdfTask = tasks.register( 'renderGettingStartedGuidePdf', AsciidoctorPdfTask ) { task -> + group = "Documentation" + description = 'Renders the Getting Started Guides in PDF format using Asciidoctor.' + inputs.property "hibernate-version", project.ormVersion + + baseDir = file( 'src/main/asciidoc/quickstart/guides' ) + + sourceDir = file( 'src/main/asciidoc/quickstart/guides' ) + sources 'index.adoc' + + outputDir = layout.buildDirectory.dir( "asciidoc/quickstart/pdf" ) + + attributes jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/" +} + +def renderGettingStartedGuidesTask = tasks.register( 'renderGettingStartedGuides' ) { task -> + task.group = "Documentation" + task.description = 'Renders the Getting Started Guide in all formats, as well as the acccompanying tutorial zip.' + task.dependsOn renderGettingStartedGuideHtmlTask, renderGettingStartedGuidePdfTask, buildTutorialZipTask +} + + +// Introduction ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +def renderIntroductionHtmlTask = tasks.register( 'renderIntroductionHtml', AsciidoctorTask ) { task -> + group = "Documentation" + description = 'Renders the Introduction in HTML format using Asciidoctor.' + inputs.property "hibernate-version", project.ormVersion + + sourceDir = file( 'src/main/asciidoc/introduction' ) + sources 'Hibernate_Introduction.adoc' + + outputDir = "$buildDir/asciidoc/introduction/html_single" + + attributes linkcss: true, + stylesheet: "css/hibernate.css", + docinfo: 'private', + jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/" + + resources { + from( 'src/main/asciidoc/introduction/' ) { + include 'images/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'images/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'css/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'js/**' + } + } +} + +def renderIntroductionPdfTask = tasks.register( 'renderIntroductionPdf', AsciidoctorPdfTask ) { task -> + group = "Documentation" + description = 'Renders the Introduction in PDF format using Asciidoctor.' + inputs.property "hibernate-version", project.ormVersion + + baseDir = file( 'src/main/asciidoc/introduction' ) + + sourceDir = file( 'src/main/asciidoc/introduction' ) + sources 'Hibernate_Introduction.adoc' + + outputDir = "$buildDir/asciidoc/introduction/pdf" + + attributes jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/" +} + +//noinspection GroovyUnusedAssignment +def renderIntroductionGuidesTask = tasks.register( "renderIntroductionGuides" ) { task -> + group = "Documentation" + description = 'Renders the Introduction Guide in all formats.' + task.dependsOn renderIntroductionHtmlTask, renderIntroductionPdfTask -task renderUserGuide(type: AsciidoctorTask, group: 'Documentation') {task-> - description = 'Renders the User Guides in HTML format using Asciidoctor.' tasks.buildDocs.dependsOn task - tasks.buildDocsForPublishing.dependsOn task +} - dependsOn tasks.collectConfigProperties - sourceDir = file( 'src/main/asciidoc/userguide' ) - sources { - include 'Hibernate_User_Guide.adoc' - } - outputDir = "$buildDir/asciidoc/userguide/html_single" +// HQL Guide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +def renderQueryLanguageGuideHtmlTask = tasks.register( 'renderQueryLanguageGuideHtml', AsciidoctorTask ) { task -> + task.group = "Documentation" + task.description = 'Renders the Query Language document in HTML format using Asciidoctor.' + + task.inputs.property "hibernate-version", project.ormVersion + + task.sourceDir = file( 'src/main/asciidoc/querylanguage' ) + task.sources 'Hibernate_Query_Language.adoc' + + task.outputDir = layout.buildDirectory.dir( "asciidoc/querylanguage/html_single" ) + + task.attributes linkcss: true, + stylesheet: "css/hibernate.css", + docinfo: 'private', + jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/" + + task.resources { + from( 'src/main/asciidoc/querylanguage/' ) { + include 'images/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'images/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'css/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'js/**' + } + } +} + +def renderQueryLanguageGuidePdfTask = tasks.register( 'renderQueryLanguageGuidePdf', AsciidoctorPdfTask ) { task -> + group = "Documentation" + description = 'Renders the Query Language document in PDF format using Asciidoctor.' + + inputs.property "hibernate-version", project.ormVersion + + sourceDir = file( 'src/main/asciidoc/querylanguage' ) + baseDir = file( 'src/main/asciidoc/querylanguage' ) + sources { + include 'Hibernate_Query_Language.adoc' + } + outputDir = layout.buildDirectory.dir( "asciidoc/querylanguage/pdf" ) + + attributes jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/" +} + +//noinspection GroovyUnusedAssignment +def renderQueryLanguageGuidesTask = tasks.register( 'renderQueryLanguageGuides' ) { task -> + group = "Documentation" + description = 'Renders Query Language Guide in all formats.' + task.dependsOn renderQueryLanguageGuideHtmlTask + task.dependsOn renderQueryLanguageGuidePdfTask + + tasks.buildDocs.dependsOn task +} + + +// User Guide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +settingsDocumentation { + anchorNameBase = "settings" + sections { + compliance { + explicitPosition = 1 + summary = "Jakarta Persistence Compliance Settings" + description = "Settings which control various aspects of compliance with the Jakarta Persistence specification " + settingsClassName "org.hibernate.cfg.JpaComplianceSettings" + } + persistence { + explicitPosition = 2 + summary = "Persistence Unit Settings" + description = "Settings related to the creation and configuration of a persistence-unit - SessionFactory and EntityManagerFactory" + settingsClassName "org.hibernate.cfg.PersistenceSettings" + } + jdbc { + explicitPosition = 3 + summary = "JDBC Settings" + description = "Settings which control various aspects of how Hibernate interacts with JDBC" + settingsClassName "org.hibernate.cfg.JdbcSettings" + } + c3p0 { + explicitPosition = 4 + summary = "C3P0 Connection Pool Settings" + description = "Settings related to integrating with the C3P0 Connection pool" + settingsClassName "org.hibernate.cfg.C3p0Settings" + } + proxool { + explicitPosition = 5 + summary = "Proxool Connection Pool Settings" + description = "Settings related to integrating with the Proxool Connection pool" + settingsClassName "org.hibernate.cfg.ProxoolSettings" + } + mapping { + explicitPosition = 6 + summary = "Domain Mapping Settings" + description = "Settings which control how domain mappings are handled" + settingsClassName "org.hibernate.cfg.MappingSettings" + } + environment { + summary = "Runtime Environment Settings" + description = "Settings related to JNDI and ClassLoaders" + settingsClassName "org.hibernate.cfg.EnvironmentSettings" + } + schema { + summary = "Schema Tooling Settings" + description = "Settings which control the creation, dropping, update and validation of database schemas" + settingsClassName "org.hibernate.cfg.SchemaToolingSettings" + } + bytecode { + summary = "Bytecode Manipulation Settings" + description = "Settings which control Hibernate's BytecodeProvider used for bytecode manipulation" + settingsClassName "org.hibernate.cfg.BytecodeSettings" + } + cache { + summary = "Second-level Cache Settings" + description = "Settings which control Hibernate's second-level caching" + settingsClassName "org.hibernate.cfg.CacheSettings" + settingsClassName "org.hibernate.cache.jcache.ConfigSettings" + } + query { + summary = "Query Settings" + description = "Settings which control various parts of Hibernate's Query handling" + settingsClassName "org.hibernate.cfg.QuerySettings" + } + stats { + summary = "Statistics Settings" + description = "Settings which control the collection of statistics" + settingsClassName "org.hibernate.cfg.StatisticsSettings" + } + validation { + summary = "Jakarta Validation Integeration Settings" + description = "Settings used in the integration of Jakarta Validation" + settingsClassName "org.hibernate.cfg.ValidationSettings" + } + envers { + summary = "Audit/History Settings" + description = "Settings which control Hibernate's audit/history support (hibernate-envers)" + settingsClassName "org.hibernate.envers.configuration.EnversSettings" + } + spatial { + summary = "Hibernate Spatial Settings" + description = "Settings which control Hibernate's support for spatial data (hibernate-spatial)" + settingsClassName "org.hibernate.spatial.HibernateSpatialConfigurationSettings" + settingsClassName "org.hibernate.spatial.integration.SpatialService" + } + misc { + summary = "Miscellaneous Settings" + description = "Miscellaneous Settings" + settingsClassName "org.hibernate.cfg.AvailableSettings" + } + } +} + +def generateSettingsDocTask = tasks.named( "generateSettingsDoc" ) { + dependsOn aggregateJavadocsTask + + doFirst { + def javadoc = aggregateJavadocsTask.get() + def javaLanguageVersion = javadoc.javadocTool.present + ? javadoc.javadocTool.get().metadata.languageVersion + : JavaLanguageVersion.of( JavaVersion.current().name ) + + if ( javaLanguageVersion.asInt() > 11 ) { + println "Aggregated Javadocs was built using a JDK newer than version 11; generating the settings User Guide fragment will not succeed" + } + } +} + +def renderUserGuideHtmlTask = tasks.register( 'renderUserGuideHtml', AsciidoctorTask ) { task -> + group = "Documentation" + description = 'Renders the User Guides in HTML format using Asciidoctor.' + inputs.property "hibernate-version", project.ormVersion + inputs.file( generateSettingsDocTask.get().outputFile ) + + dependsOn generateSettingsDocTask + + sourceDir = file( 'src/main/asciidoc/userguide' ) + sources { + include 'Hibernate_User_Guide.adoc' + } + outputDir = "$buildDir/asciidoc/userguide/html_single" attributes linkcss: true, stylesheet: "css/hibernate.css", @@ -192,27 +605,34 @@ task renderUserGuide(type: AsciidoctorTask, group: 'Documentation') {task-> jpaJavadocUrlPrefix: "https://javaee.github.io/javaee-spec/javadocs/javax/persistence/" resources { - from('src/main/asciidoc/userguide/') { - include 'images/**' - } - from('src/main/style/asciidoctor') { + from( 'src/main/asciidoc/userguide/' ) { + include 'images/**' + } + from( 'src/main/style/asciidoctor' ) { include 'images/**' } - from('src/main/style/asciidoctor') { + from( 'src/main/style/asciidoctor' ) { include 'css/**' } - from('src/main/style/asciidoctor') { + from( 'src/main/style/asciidoctor' ) { include 'js/**' } - } + } } +def renderUserGuidesTask = tasks.register( 'renderUserGuides' ) { task -> + task.group = "Documentation" + task.description = 'Renders the User Guides in all formats.' + task.dependsOn( renderUserGuideHtmlTask ) +} + + // Integration Guide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -task renderIntegrationGuide(type: AsciidoctorTask, group: 'Documentation') {task-> +def renderIntegrationGuideHtmlTask = tasks.register( "renderIntegrationGuideHtml", AsciidoctorTask ) { task-> + group = "Documentation" description = 'Renders the User Guides in HTML format using Asciidoctor.' - tasks.buildDocs.dependsOn task - tasks.buildDocsForPublishing.dependsOn task + inputs.property "hibernate-version", project.ormVersion sourceDir = file( 'src/main/asciidoc/integrationguide' ) sources { @@ -222,110 +642,159 @@ task renderIntegrationGuide(type: AsciidoctorTask, group: 'Documentation') {task outputDir = project.layout.buildDirectory.dir( 'asciidoc/integrationguide/html_single' ) attributes linkcss: true, - stylesheet: "css/hibernate.css" + stylesheet: "css/hibernate.css" resources { - from('src/main/asciidoc/integrationguide/') { + from( 'src/main/asciidoc/integrationguide/' ) { include 'images/**' } - from('src/main/style/asciidoctor') { + from( 'src/main/style/asciidoctor' ) { include 'images/**' } - from('src/main/style/asciidoctor') { + from( 'src/main/style/asciidoctor' ) { include 'css/**' } } } +def renderIntegrationGuidesTask = tasks.register( "renderIntegrationGuides" ) { task -> + group = "Documentation" + description = 'Renders all formats of the User Guide.' + task.dependsOn renderIntegrationGuideHtmlTask + + tasks.buildDocs.dependsOn task +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Migration Guide -task renderMigrationGuide(type: AsciidoctorTask, group: 'Documentation') {task-> - description = 'Renders the Migration Guide in HTML format using Asciidoctor.' +def migrationGuideSourceStagingDir = layout.buildDirectory.dir( "tmp/asciidoc/migration-guide" ) - tasks.buildDocs.dependsOn task - tasks.buildDocsForPublishing.dependsOn task +def copyMigrationGuideTask = tasks.register( "copyMigrationGuide", Copy ) {task -> + group = "Documentation" + description = "Copies migration-guide.adoc in preparation for rendering." - sourceDir = rootProject.layout.projectDirectory - sources { - include 'migration-guide.adoc' - } + inputs.property "hibernate-version", project.ormVersion + + from rootProject.layout.projectDirectory.file( "migration-guide.adoc" ) + into migrationGuideSourceStagingDir +} + +def renderMigrationGuideTask = tasks.register( "renderMigrationGuide", AsciidoctorTask ) { task -> + group = "Documentation" + description = "Renders the Migration Guide in HTML format using Asciidoctor." + + dependsOn copyMigrationGuideTask + inputs.property "hibernate-version", project.ormVersion + + sourceDir = migrationGuideSourceStagingDir outputDir = project.layout.buildDirectory.dir( 'asciidoc/migration-guide' ) attributes linkcss: true, - stylesheet: "css/hibernate.css" + stylesheet: "css/hibernate.css" resources { - from('src/main/style/asciidoctor') { + from( 'src/main/style/asciidoctor' ) { include 'images/**' } - from('src/main/style/asciidoctor') { + from( 'src/main/style/asciidoctor' ) { include 'css/**' } } } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ORM Reports -task renderOrmReports { task -> - group 'Documentation' - description 'Grouping task for rendering all ORM reports' +def renderLoggingReportTask = tasks.register( 'renderLoggingReport', AsciidoctorTask ) { task -> + task.group = "hibernate-reports" + task.description = 'Renders the logging report (generated LoggingReportTask) by in HTML format using Asciidoctor.' + task.dependsOn "generateLoggingReport" - dependsOn tasks.generateIncubationReport - dependsOn tasks.generateInternalsReport - dependsOn tasks.generateDeprecationReport + task.inputs.property "version", project.ormVersion - tasks.buildDocs.dependsOn task - tasks.buildDocsForPublishing.dependsOn task -} + task.sourceDir = layout.buildDirectory.dir( 'orm/generated/logging' ) + task.sources 'logging.adoc' -task renderLoggingReport(type: AsciidoctorTask, group: 'Documentation') { task -> - group 'Documentation' - description = 'Renders the ORM logging report in HTML format using Asciidoctor.' + task.outputDir = project.layout.buildDirectory.dir( 'asciidoc/logging' ) - dependsOn tasks.generateLoggingReport + task.attributes linkcss: true, + stylesheet: "css/hibernate.css" - tasks.renderOrmReports.dependsOn task - - sourceDir = layout.buildDirectory.dir( 'orm/reports' ) - sources { - include 'logging.adoc' + task.resources { + from( 'src/main/style/asciidoctor' ) { + include 'images/**' + } + from( 'src/main/style/asciidoctor' ) { + include 'css/**' + } } +} - outputDir = project.layout.buildDirectory.dir( 'asciidoc/logging' ) +def renderDialectReportTask = tasks.register( 'renderDialectReport', AsciidoctorTask ) { task -> + task.group = "hibernate-reports" + task.description = 'Renders the supported Dialect report in HTML format using Asciidoctor.' + task.dependsOn "generateDialectReport" - attributes linkcss: true, - stylesheet: "css/hibernate.css" + task.inputs.property "version", project.ormVersion - resources { - from('src/main/style/asciidoctor') { + task.sourceDir = layout.buildDirectory.dir( 'orm/generated/dialect' ) + task.sources 'dialect.adoc' + + task.outputDir = project.layout.buildDirectory.dir( 'asciidoc/dialect' ) + + task.attributes linkcss: true, + stylesheet: "css/hibernate.css" + + task.resources { + from( 'src/main/style/asciidoctor' ) { include 'images/**' } - from('src/main/style/asciidoctor') { + from( 'src/main/style/asciidoctor' ) { include 'css/**' } } } +def generateReportsTask = tasks.named( "generateReports" ) { + dependsOn renderLoggingReportTask, renderDialectReportTask +} + + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Overall grouping tasks +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +def buildDocsTask = tasks.register( 'buildDocs' ) { task -> + task.group = 'Documentation' + task.description = 'Grouping task for performing all documentation building tasks' + task.dependsOn aggregateJavadocsTask + task.dependsOn renderGettingStartedGuidesTask + task.dependsOn renderIntroductionGuidesTask + task.dependsOn renderUserGuidesTask + task.dependsOn renderQueryLanguageGuidesTask + task.dependsOn renderIntegrationGuidesTask + task.dependsOn renderTopicalGuidesTask + task.dependsOn generateReportsTask + task.dependsOn renderMigrationGuideTask +} + +//noinspection GroovyUnusedAssignment +def buildDocsForPublishingTask = tasks.register( 'buildDocsForPublishing' ) { task -> + task.group = 'Documentation' + task.description = 'Grouping task for building all documentation for publishing (release)' + task.dependsOn buildDocsTask +} // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -tasks.withType(AsciidoctorTask).all { +tasks.withType(AsciidoctorTask).configureEach { baseDirFollowsSourceDir() outputOptions { separateOutputDirs = false backends 'html5' } } - -// resources inherently exclude sources -sourceSets.test.resources { - setSrcDirs( ['src/test/java','src/test/resources'] ) -} - - -checkstyleMain.exclude '**/org/hibernate/userguide/model/*' - diff --git a/documentation/src/main/asciidoc/integrationguide/Hibernate_Integration_Guide.adoc b/documentation/src/main/asciidoc/integrationguide/Hibernate_Integration_Guide.adoc index 7450ba37ade4..1336a737f937 100644 --- a/documentation/src/main/asciidoc/integrationguide/Hibernate_Integration_Guide.adoc +++ b/documentation/src/main/asciidoc/integrationguide/Hibernate_Integration_Guide.adoc @@ -1,11 +1,12 @@ -= Hibernate ORM {majorMinorVersion} Integration Guide -Steve Ebersole, Vlad Mihalcea += Hibernate ORM Integration Guide :toc: :toclevels: 3 -include::Preface.adoc[] +include::preface.adoc[] :numbered: include::chapters/services/Services.adoc[] +include::credits.adoc[] + diff --git a/documentation/src/main/asciidoc/integrationguide/Preface.adoc b/documentation/src/main/asciidoc/integrationguide/Preface.adoc deleted file mode 100644 index d348dc231a36..000000000000 --- a/documentation/src/main/asciidoc/integrationguide/Preface.adoc +++ /dev/null @@ -1,13 +0,0 @@ -[[preface]] -== Preface - -Hibernate is an https://en.wikipedia.org/wiki/Object-relational_mapping[Object/Relational Mapping] solution for Java environments. - -Hibernate not only takes care of the mapping from Java classes to database tables (and from Java data types to SQL data types), but also provides data query and retrieval facilities. -It can significantly reduce development time otherwise spent with manual data handling in SQL and JDBC. -Hibernate’s design goal is to relieve the developer from 95% of common data persistence-related programming tasks by eliminating the need for manual, hand-crafted data processing using SQL and JDBC. -However, unlike many other persistence solutions, Hibernate does not hide the power of SQL from you and guarantees that your investment in relational technology and knowledge is as valid as always. - -=== Audience - -This guide is for software developers and architects who will be integrating Hibernate with Java EE application servers, Spring framework, caching solutions (e.g. Infinispan, Ehcache, Hazelcast). diff --git a/documentation/src/main/asciidoc/integrationguide/credits.adoc b/documentation/src/main/asciidoc/integrationguide/credits.adoc new file mode 100644 index 000000000000..60f1fd6b8f0b --- /dev/null +++ b/documentation/src/main/asciidoc/integrationguide/credits.adoc @@ -0,0 +1,10 @@ +[[credits]] +== Credits + +The full list of contributors to Hibernate ORM can be found on the +https://github.com/hibernate/hibernate-orm/graphs/contributors[GitHub repository]. + +The following contributors were involved in this documentation: + +* Steve Ebersole +* Vlad Mihalcea \ No newline at end of file diff --git a/documentation/src/main/asciidoc/integrationguide/preface.adoc b/documentation/src/main/asciidoc/integrationguide/preface.adoc new file mode 100644 index 000000000000..924423496ee4 --- /dev/null +++ b/documentation/src/main/asciidoc/integrationguide/preface.adoc @@ -0,0 +1,8 @@ +[[preface]] +== Preface + +Hibernate provides many integration points that allow integration new capabilities or supplying custom behavior for standard capabilities. This guide covers these integration points and is intended for software developers and architects who will be + +- integrating Hibernate with Java EE application servers, Spring framework, caching solutions (e.g. Infinispan, Ehcache, Hazelcast). +- supplying custom integrations +- want to override standard capabilities \ No newline at end of file diff --git a/documentation/src/main/asciidoc/introduction/Advanced.adoc b/documentation/src/main/asciidoc/introduction/Advanced.adoc new file mode 100644 index 000000000000..373285e66018 --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Advanced.adoc @@ -0,0 +1,995 @@ +[[advanced]] +== Advanced Topics + +In the last chapter of this Introduction, we turn to some topics that don't really belong in an introduction. +Here we consider some problems, and solutions, that you're probably not going to run into immediately if you're new to Hibernate. +But we do want you to know _about_ them, so that when the time comes, you'll know what tool to reach for. + +[[filters]] +=== Filters + +_Filters_ are one of the nicest and under-usedest features of Hibernate, and we're quite proud of them. +A filter is a named, globally-defined, parameterized restriction on the data that is visible in a given session. + +Examples of well-defined filters might include: + +- a filter that restricts the data visible to a given user according to row-level permissions, +- a filter which hides data which has been soft-deleted, +- in a versioned database, a filter that displays versions which were current at a given instant in the past, or +- a filter that restricts to data associated with a certain geographical region. + +A filter must be declared somewhere. +A package descriptor is as good a place as any: + +[source,java] +---- +@FilterDef(name = "ByRegion", + parameters = @ParamDef(name = "region", type = String.class)) +package org.hibernate.example; +---- + +This filter has one parameter. +Fancier filters might in principle have multiple parameters, though we admit this must be quite rare. + +[IMPORTANT] +==== +If you add annotations to a package descriptor, and you're using `Configuration` to configure Hibernate, make sure you call `Configuration.addPackage()` to let Hibernate know that the package descriptor is annotated. +==== + +_Typically_, but not necessarily, a `@FilterDef` specifies a default restriction: + +[source,java] +---- +@FilterDef(name = "ByRegion", + parameters = @ParamDef(name = "region", type = String.class), + defaultCondition = "region = :region") +package org.hibernate.example; +---- + +The restriction must contain a reference to the parameter of the filter, specified using the usual syntax for named parameters. + +Any entity or collection which is affected by a filter must be annotated `@Filter`: + +[source,java] +---- +@Entity +@Filter(name = example_.BY_REGION) +class User { + + @Id String username; + + String region; + + ... +} +---- + +If the `@Filter` annotation does not explicitly specify a restriction, the default restriction given by the `@FilterDef` will be applied to the entity. +But an entity is free to override the default condition. + +[source,java] +---- +@Entity +@Filter(name = "ByRegion", condition = "name = :region") +class Region { + + @Id String name; + + ... +} +---- + +Note that the restriction specified by the `condition` or `defaultCondition` is a native SQL expression. + +.Annotations for defining filters +[%breakable,cols="25,~"] +|=== +| Annotation | Purpose + +| `@FilterDef` | Defines a filter and declares its name (exactly one per filter) +| `@Filter` | Specifies how a filter applies to a given entity or collection (many per filter) +|=== + +By default, a new session comes with every filter disabled. +A filter may be explicitly enabled in a given session by calling `enableFilter()` and assigning arguments to the parameters of the filter. +You should do this right at the _start_ of the session. + +[source,java] +---- +sessionFactory.inTransaction(session -> { + session.enableFilter("ByRegion") + .setParameter("region", "es") + .validate(); + + ... +}); +---- + +Now, any queries executed within the session will have the filter restriction applied. +Collections annotated `@Filter` will also have their members correctly filtered. + +[CAUTION] +==== +On the other hand, filters are not applied to `@ManyToOne` associations, nor to `find()`. +This is completely by design and is not in any way a bug. +==== + +More than one filter may be enabled in a given session. + +:envers: https://hibernate.org/orm/envers/ +:envers-doc: {doc-user-guide-url}#envers + +We've mentioned that a filter can be used to implement versioning, and to provide _historical_ views of the data. +Being such a general-purpose construct, filters provide a lot of flexibility here. +But if you're after a more focused/opinionated solution to this problem, you should definitely check out {envers}[Envers]. + +[[envers]] +.Using Envers for auditing historical data +**** +Envers is an add-on to Hibernate ORM which keeps a historical record of each versioned entity in a separate _audit table_, and allows past revisions of the data to be viewed and queried. +A full introduction to Envers would require a whole chapter, so we'll just give you a quick taste here. + +First, we must mark an entity as versioned, using the `@Audited` annotation: +[source,java] +---- +@Audited @Entity +@Table(name="CurrentDocument") +@AuditTable("DocumentRevision") +class Document { ... } +---- +[TIP] +==== +The `@AuditTable` annotation is optional, and it's better to set either `org.hibernate.envers.audit_table_prefix` or `org.hibernate.envers.audit_table_suffix` and let the audit table name be inferred. +==== +The `AuditReader` interface exposes operations for retrieving and querying historical revisions. +It's really easy to get hold of one of these: +[source,java] +---- +AuditReader reader = AuditReaderFactory.get(entityManager); +---- +Envers tracks revisions of the data via a global _revision number_. +We may easily find the revision number which was current at a given instant: +[source,java] +---- +Number revision = reader.getRevisionNumberForDate(datetime); +---- +We can use the revision number to ask for the version of our entity associated with the given revision number: +[source,java] +---- +Document doc = reader.find(Document.class, id, revision); +---- +Alternatively, we can directly ask for the version which was current at a given instant: +[source,java] +---- +Document doc = reader.find(Document.class, id, datetime); +---- +We can even execute queries to obtain lists of entities current at the given revision number: +[source,java] +---- +List documents = + reader.createQuery() + .forEntitiesAtRevision(Document.class, revision) + .getResultList(); +---- +For much more information, see {envers-doc}[the User Guide]. +**** + +Another closely-related problem is multi-tenancy. + +[[multitenancy]] +=== Multi-tenancy + +A _multi-tenant_ database is one where the data is segregated by _tenant_. +We don't need to actually define what a "tenant" really represents here; all we care about at this level of abstraction is that each tenant may be distinguished by a unique identifier. +And that there's a well-defined _current tenant_ in each session. + +We may specify the current tenant when we open a session: + +[source,java] +---- +var session = + sessionFactory.withOptions() + .tenantIdentifier(tenantId) + .openSession(); +---- + +Or, when using JPA-standard APIs: + +[source,java] +---- +var entityManager = + entityManagerFactory.createEntityManager(Map.of(HibernateHints.HINT_TENANT_ID, tenantId)); +---- + +However, since we often don't have this level of control over creation of the session, it's more common to supply an implementation of `CurrentTenantIdentifierResolver` to Hibernate. + +There are three common ways to implement multi-tenancy: + +1. each tenant has its own database, +2. each tenant has its own schema, or +3. tenants share tables in a single schema, and rows are tagged with the tenant id. + +From the point of view of Hibernate, there's little difference between the first two options. +Hibernate will need to obtain a JDBC connection with permissions on the database and schema owned by the current tenant. + +Therefore, we must implement a `MultiTenantConnectionProvider` which takes on this responsibility: + +- from time to time, Hibernate will ask for a connection, passing the id of the current tenant, and then we must create an appropriate connection or obtain one from a pool, and return it to Hibernate, and +- later, Hibernate will release the connection and ask us to destroy it or return it to the appropriate pool. + +[TIP] +==== +Check out `DataSourceBasedMultiTenantConnectionProviderImpl` for inspiration. +==== + +The third option is quite different. +In this case we don't need a `MultiTenantConnectionProvider`, but we will need a dedicated column holding the tenant id mapped by each of our entities. + +[source,java] +---- +@Entity +class Account { + @Id String id; + @TenantId String tenantId; + + ... +} +---- + +The `@TenantId` annotation is used to indicate an attribute of an entity which holds the tenant id. +Within a given session, our data is automatically filtered so that only rows tagged with the tenant id of the current tenant are visible in that session. + +[CAUTION] +==== +Native SQL queries are _not_ automatically filtered by tenant id; you'll have to do that part yourself. +==== + +.Multi-tenancy configuration +[%breakable,cols="36,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.tenant_identifier_resolver` | Specifies the `CurrentTenantIdentifierResolver` +| `hibernate.multi_tenant_connection_provider` | Specifies the `MultiTenantConnectionProvider` +|=== + +[TIP] +==== +If you only need to filter rows by a static condition with no parameters, `@Where` is a much simpler way to do that. +==== + +[[custom-sql]] +=== Using custom-written SQL + +We've already discussed how to run <>, but occasionally that's not enough. +Sometimes—but much less often than you might expect—we would like to customize the SQL used by Hibernate to perform basic CRUD operations for an entity or collection. + +For this we can use `@SQLInsert` and friends: + +[source,java] +---- +@Entity +@SQLInsert(sql = "insert into person (name, id, valid) values (?, ?, true)", check = COUNT) +@SQLUpdate(sql = "update person set name = ? where id = ?") +@SQLDelete(sql = "update person set valid = false where id = ?") +@SQLSelect(sql = "select id, name from person where id = ? and valid = true") +public static class Person { ... } +---- + +[TIP] +==== +If the custom SQL should be executed via a `CallableStatement`, just specify `callable=true`. +==== + +Any SQL statement specified by one of these annotations must have exactly the number of JDBC parameters that Hibernate expects, that is, one for each column mapped by the entity, in the exact order Hibernate expects. In particular, the primary key columns must come last. + +However, the `@Column` annotation does lend some flexibility here: + +- if a column should not be written as part of the custom `insert` statement, and has no corresponding JDBC parameter in the custom SQL, map it `@Column(insertable=false)`, or +- if a column should not be written as part of the custom `update` statement, and has no corresponding JDBC parameter in the custom SQL, map it `@Column(updatable=false)`. + +[TIP] +==== +If you need custom SQL, but are targeting multiple dialects of SQL, you can use the annotations defined in `DialectOverrides`. +For example, this annotation lets us override the custom `insert` statement just for PostgreSQL: + +[source,java] +---- +@DialectOverride.SQLInsert(dialect = PostgreSQLDialect.class, + override = @SQLInsert(sql="insert into person (name,id) values (?,gen_random_uuid())")) +---- +It's even possible to override the custom SQL for specific _versions_ of a database. +==== + +Sometimes a custom `insert` or `update` statement assigns a value to a mapped column which is calculated when the statement is executed on the database. +For example, the value might be obtained by calling a SQL function: + +[source,java] +---- +@SQLInsert(sql = "insert into person (name, id) values (?, gen_random_uuid())") +---- + +But the entity instance which represents the row being inserted or updated won't be automatically populated with that value. +And so our persistence context loses synchronization with the database. +In situations like this, we may use the `@Generated` annotation to tell Hibernate to reread the state of the entity after each `insert` or `update`. + +[[database-generated-columns]] +=== Handling database-generated columns + +Sometimes, a column value is assigned or mutated by events that happen in the database, and aren't visible to Hibernate. +For example: + +- a table might have a column value populated by a trigger, +- a mapped column might have a default value defined in DDL, or +- a custom SQL `insert` or `update` statement might assign a value to a mapped column, as we saw in the previous subsection. + +One way to deal with this situation is to explicitly call `refresh()` at appropriate moments, forcing the session to reread the state of the entity. +But this is annoying. + +The `@Generated` annotation relieves us of the burden of explicitly calling `refresh()`. +It specifies that the value of the annotated entity attribute is generated by the database, and that the generated value should be automatically retrieved using a SQL `returning` clause, or separate `select` after it is generated. + +A useful example is the following mapping: + +[source,java] +---- +@Entity +class Entity { + @Generated @Id + @ColumnDefault("gen_random_uuid()") + UUID id; +} +---- + +The generated DDL is: + +[source,sql] +---- +create table Entity ( + id uuid default gen_random_uuid() not null, + primary key (uuid) +) +---- + +So here the value of `id` is defined by the column default clause, by calling the PostgreSQL function `gen_random_uuid()`. + +When a column value is generated during updates, use `@Generated(event=UPDATE)`. +When a value is generated by both inserts _and_ updates, use `@Generated(event={INSERT,UPDATE})`. + +[TIP] +==== +For columns which should be generated using a SQL `generated always as` clause, prefer the `@GeneratedColumn` annotation, so that Hibernate automatically generates the correct DDL. +==== + +Actually, the `@Generated` and `@GeneratedColumn` annotations are defined in terms of a more generic and user-extensible framework for handling attribute values generated in Java, or by the database. + +[[user-defined-generators]] +=== User-defined generators + +JPA doesn't define a standard way to extend the set of id generation strategies, but Hibernate does: + +- the `Generator` hierarchy of interfaces in the package `org.hibernate.generator` lets you define new generators, and +- the `@IdGeneratorType` meta-annotation from the package `org.hibernate.annotations` lets you write an annotation which associates a `Generator` type with identifier attributes. + +Furthermore, the `@ValueGenerationType` meta-annotation lets you write an annotation which associates a `Generator` type with a non-`@Id` attribute. + +[NOTE] +// .The older APIs are still available in Hibernate 6 +==== +These APIs are new in Hibernate 6, and supersede the classic `IdentifierGenerator` interface and `@GenericGenerator` annotation from older versions of Hibernate. +However, the older APIs are still available and custom ``IdentifierGenerator``s written for older versions of Hibernate continue to work in Hibernate 6. +==== + +Hibernate has a range of built-in generators which are defined in terms of this new framework. + +.Built-in generators +[%breakable,cols="20,25,~"] +|=== +| Annotation | Implementation | Purpose + +| `@Generated` | `GeneratedGeneration` | Generically handles database-generated values +| `@GeneratedColumn` | `GeneratedAlwaysGeneration` | Handles values generated using `generated always` +| `@CurrentTimestamp` | `CurrentTimestampGeneration` | Generic support for database or in-memory generation of creation or update timestamps +| `@CreationTimestamp` | `CurrentTimestampGeneration` | A timestamp generated when an entity is first made persistent +| `@UpdateTimestamp` | `CurrentTimestampGeneration` | A timestamp generated when an entity is made persistent, and regenerated every time the entity is modified +| `@UuidGenerator` | `UuidGenerator` | A more flexible generator for RFC 4122 UUIDs +|=== + +Furthermore, support for JPA's standard id generation strategies is also defined in terms of this framework. + +As an example, let's look at how `@UuidGenerator` is defined: + +[source,java] +---- +@IdGeneratorType(org.hibernate.id.uuid.UuidGenerator.class) +@ValueGenerationType(generatedBy = org.hibernate.id.uuid.UuidGenerator.class) +@Retention(RUNTIME) +@Target({ FIELD, METHOD }) +public @interface UuidGenerator { ... } +---- + +`@UuidGenerator` is meta-annotated both `@IdGeneratorType` and `@ValueGenerationType` because it may be used to generate both ids and values of regular attributes. +Either way, this `Generator` class does the hard work: + +[source,java] +---- +public class UuidGenerator + // this generator produced values before SQL is executed + implements BeforeExecutionGenerator { + + // constructors accept an instance of the @UuidGenerator + // annotation, allowing the generator to be "configured" + + // called to create an id generator + public UuidGenerator( + org.hibernate.annotations.UuidGenerator config, + Member idMember, + CustomIdGeneratorCreationContext creationContext) { + this(config, idMember); + } + + // called to create a generator for a regular attribute + public UuidGenerator( + org.hibernate.annotations.UuidGenerator config, + Member member, + GeneratorCreationContext creationContext) { + this(config, idMember); + } + + ... + + @Override + public EnumSet getEventTypes() { + // UUIDs are only assigned on insert, and never regenerated + return INSERT_ONLY; + } + + @Override + public Object generate(SharedSessionContractImplementor session, Object owner, Object currentValue, EventType eventType) { + // actually generate a UUID and transform it to the required type + return valueTransformer.transform( generator.generateUuid( session ) ); + } +} +---- + +You can find out more about custom generators from the Javadoc for `@IdGeneratorType` and for `org.hibernate.generator`. + + +[[naming-strategies]] +=== Naming strategies + +When working with a pre-existing relational schema, it's usual to find that the column and table naming conventions used in the schema don't match Java's naming conventions. + +Of course, the `@Table` and `@Column` annotations let us explicitly specify a mapped table or column name. +But we would prefer to avoid scattering these annotations across our whole domain model. + +Therefore, Hibernate lets us define a mapping between Java naming conventions, and the naming conventions of the relational schema. +Such a mapping is called a _naming strategy_. + +First, we need to understand how Hibernate assigns and processes names. + +- _Logical naming_ is the process of applying naming rules to determine the _logical names_ of objects which were not explicitly assigned names in the O/R mapping. + That is, when there's no `@Table` or `@Column` annotation. +- _Physical naming_ is the process of applying additional rules to transform a logical name into an actual "physical" name that will be used in the database. + For example, the rules might include things like using standardized abbreviations, or trimming the length of identifiers. + +Thus, there's two flavors of naming strategy, with slightly different responsibilities. +Hibernate comes with default implementations of these interfaces: + + +|=== +| Flavor | Default implementation + +| An `ImplicitNamingStrategy` is responsible for assigning a logical name when none is specified by an annotation +| A default strategy which implements the rules defined by JPA +| A `PhysicalNamingStrategy` is responsible for transforming a logical name and producing the name used in the database +| A trivial implementation which does no processing +|=== + +[TIP] +==== +We happen to not much like the naming rules defined by JPA, which specify that mixed case and camel case identifiers should be concatenated using underscores. +We bet you could easily come up with a much better `ImplicitNamingStrategy` than that! +(Hint: it should always produce legit mixed case identifiers.) +==== +[TIP] +==== +A popular `PhysicalNamingStrategy` produces snake case identifiers. +==== + +Custom naming strategies may be enabled using the configuration properties we already mentioned without much explanation back in <>. + +.Naming strategy configuration +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.implicit_naming_strategy` | Specifies the `ImplicitNamingStrategy` +| `hibernate.physical_naming_strategy` | Specifies the `PhysicalNamingStrategy` +|=== + +[[spatial]] +=== Spatial datatypes + +:ogc: https://www.ogc.org +:geolatte: https://github.com/GeoLatte/geolatte-geom + +Hibernate Spatial augments the <> with a set of Java mappings for {ogc}[OGC] spatial types. + +- {geolatte}[Geolatte-geom] defines a set of Java types implementing the OGC spatial types, and codecs for translating to and from database-native spatial datatypes. +- Hibernate Spatial itself supplies integration with Hibernate. + +To use Hibernate Spatial, we must add it as a dependency, as described in <>. + +Then we may immediately use Geolatte-geom and JTS types in our entities. +No special annotations are needed: + +[source,java] +---- +import org.locationtech.jts.geom.Point; +import jakarta.persistence.*; + +@Entity +class Event { + Event() {} + + Event(String name, Point location) { + this.name = name; + this.location = location; + } + + @Id @GeneratedValue + Long id; + + String name; + + Point location; + +} +---- + +The generated DDL uses `geometry` as the type of the column mapped by `location`: + +[source,sql] +---- +create table Event ( + id bigint not null, + location geometry, + name varchar(255), + primary key (id) +) +---- + +Hibernate Spatial lets us work with spatial types just as we would with any of the built-in basic attribute types. + +[source,java] +---- +var geometryFactory = new GeometryFactory(); +... + +Point point = geometryFactory.createPoint(new Coordinate(10, 5)); +session.persist(new Event("Hibernate ORM presentation", point)); +---- + +But what makes this powerful is that we may write some very fancy queries involving functions of spatial types: + +[source,java] +---- +Polygon triangle = + geometryFactory.createPolygon( + new Coordinate[] { + new Coordinate(9, 4), + new Coordinate(11, 4), + new Coordinate(11, 20), + new Coordinate(9, 4) + } + ); +Point event = + session.createQuery("select location from Event where within(location, :zone) = true", Point.class) + .setParameter("zone", triangle) + .getSingleResult(); +---- + +:matrix: {doc-user-guide-url}#spatial-configuration-dialect-features + +Here, `within()` is one of the functions for testing spatial relations defined by the OpenGIS specification. +Other such functions include `touches()`, `intersects()`, `distance()`, `boundary()`, etc. +Not every spatial relation function is supported on every database. +A matrix of support for spatial relation functions may be found in the {matrix}[User Guide]. + +[TIP] +==== +If you want to play with spatial functions on H2, run the following code first: + +[source,java] +---- +sessionFactory.inTransaction(session -> { + session.doWork(connection -> { + try (var statement = connection.createStatement()) { + statement.execute("create alias if not exists h2gis_spatial for \"org.h2gis.functions.factory.H2GISFunctions.load\""); + statement.execute("call h2gis_spatial()"); + } + }); +} ); +---- +==== + +[[ordered-sorted]] +=== Ordered and sorted collections and map keys + +Java lists and maps don't map very naturally to foreign key relationships between tables, and so we tend to avoid using them to represent associations between our entity classes. +But if you feel like you _really_ need a collection with a fancier structure than `Set`, Hibernate does have options. + +The first two options let us map the index of a `List` or key of a `Map` to a column: + +.Annotations for mapping lists and maps +[%breakable,cols="22,~,^13"] +|=== +| Annotation | Purpose | JPA-standard + +| `@OrderColumn` | Specifies the column used to maintain the order of a list | ✔ +| `@MapKeyColumn` | Specifies the column used to persist the keys of a map | ✔ +|=== + +For an unowned association, the column must also be mapped on the owning side, usually by an attribute of the target entity. + +Now, let's introduce a little distinction: + +- an _ordered collection_ is one with an ordering maintained in the database, and +- a _sorted collection_ is one which is sorted in Java code. + +These annotations allow us to specify how the elements of a collection should be ordered as they are read from the database: + +.Annotations for ordered collections +[%breakable,cols="22,~,^13"] +|=== +| Annotation | Purpose | JPA-standard + +| `@OrderBy` | Specifies a fragment of JPQL used to order the collection | ✔ +|=== + +On the other hand, the following annotation specify how a collection should be sorted in memory, and are used for collections of type `SortedSet` or `SortedMap`: + +.Annotations for sorted collections +[%breakable,cols="22,~,^13"] +|=== +| Annotation | Purpose | JPA-standard + +| `@SortNatural` | Specifies that the elements of a collection are `Comparable` | ✖ +| `@SortComparator` | Specifies a `Comparator` used to sort the collection | ✖ +|=== + +Under the covers, Hibernate uses a `TreeSet` or `TreeMap` to maintain the collection in sorted order. + +[[any]] +=== Any mappings + +An `@Any` mapping is a sort of polymorphic many-to-one association where the target entity types are not related by the usual entity inheritance. +The target type is distinguished using a discriminator value stored on the _referring_ side of the relationship. + +This is quite different to <> where the discriminator is held in the tables mapped by the referenced entity hierarchy. + +For example, consider an `Order` entity containing `Payment` information, where a `Payment` might be a `CashPayment` or a `CreditCardPayment`: + +[source,java] +---- +interface Payment { ... } + +@Entity +class CashPayment { ... } + +@Entity +class CreditCardPayment { ... } +---- + +In this example, `Payment` is not be declared as an entity type, and is not annotated `@Entity`. It might even be an interface, or at most just a mapped superclass, of `CashPayment` and `CreditCardPayment`. So in terms of the object/relational mappings, `CashPayment` and `CreditCardPayment` would not be considered to participate in the same entity inheritance hierarchy. + +On the other hand, `CashPayment` and `CreditCardPayment` do have the same identifier type. +This is important. + + +An `@Any` mapping would store the discriminator value identifying the concrete type of `Payment` along with the state of the associated `Order`, instead of storing it in the table mapped by `Payment`. + +[source,java] +---- +@Entity +class Order { + ... + + @Any + @AnyKeyJavaClass(UUID.class) //the foreign key type + @JoinColumn(name="payment_id") // the foreign key column + @Column(name="payment_type") // the discriminator column + // map from discriminator values to target entity types + @AnyDiscriminatorValue(discriminator="CASH", entity=CashPayment.class) + @AnyDiscriminatorValue(discriminator="CREDIT", entity=CreditCardPayment.class) + Payment payment; + + ... +} +---- + +It's reasonable to think of the "foreign key" in an `@Any` mapping as a composite value made up of the foreign key and discriminator taken together. Note, however, that this composite foreign key is only conceptual and cannot be declared as a physical constraint on the relational database table. + +There are a number of annotations which are useful to express this sort of complicated and unnatural mapping: + +.Annotations for `@Any` mappings +|=== +| Annotations | Purpose + +| `@Any` | Declares that an attribute is a discriminated polymorphic association mapping +| `@AnyDiscriminator` | Specify the Java type of the discriminator +| `@JdbcType` or `@JdbcTypeCode` | Specify the JDBC type of the discriminator +| `@AnyDiscriminatorValue` | Specifies how discriminator values map to entity types +| `@Column` or `@Formula` | Specify the column or formula in which the discriminator value is stored +| `@AnyKeyJavaType` or `@AnyKeyJavaClass` | Specify the Java type of the foreign key (that is, of the ids of the target entities) +| `@AnyKeyJdbcType` or `@AnyKeyJdbcTypeCode` | Specify the JDBC type of the foreign key +| `@JoinColumn` | Specifies the foreign key column +|=== + +Of course, `@Any` mappings are disfavored, except in extremely special cases, since it's much more difficult to enforce referential integrity at the database level. + +There's also currently some limitations around querying `@Any` associations in HQL. +This is allowed: + +[source,hql] +---- +from Order ord + join CashPayment cash + on id(ord.payment) = cash.id +---- + +[CAUTION] +==== +Polymorphic association joins for `@Any` mappings are not currently implemented. +==== + +[[dynamic-insert-update]] +=== Selective column lists in inserts and updates + +By default, Hibernate generates `insert` and `update` statements for each entity during boostrap, and reuses the same `insert` statement every time an instance of the entity is made persistent, and the same `update` statement every time an instance of the entity is modified. + +This means that: + +- if an attribute is `null` when the entity is made persistent, its mapped column is redundantly included in the SQL `insert`, and +- worse, if a certain attribute is unmodified when other attributes are changed, the column mapped by that attribute is redundantly included in the SQL `update`. + +Most of the time this just isn't an issue worth worrying about. +The cost of interacting with the database is _usually_ dominated by the cost of a round trip, not by the number of columns in the `insert` or `update`. +But in cases where it does become important, there are two ways to be more selective about which columns are included in the SQL. + +The JPA-standard way is to indicate statically which columns are eligible for inclusion via the `@Column` annotation. +For example, if an entity is always created with an immutable `creationDate`, and with no `completionDate`, then we would write: + +[source,java] +---- +@Column(updatable=false) LocalDate creationDate; +@Column(insertable=false) LocalDate completionDate; +---- + +This approach works quite well in many cases, but often breaks down for entities with more than a handful of updatable columns. + +An alternative solution is to ask Hibernate to generate SQL dynamically each time an `insert` or `update` is executed. +We do this by annotating the entity class. + +.Annotations for dynamic SQL generation +[%breakable,cols="25,~"] +|=== +| Annotation | Purpose + +| `@DynamicInsert` | Specifies that an `insert` statement should be generated each time an entity is made persistent +| `@DynamicUpdate` | Specifies that an `update` statement should be generated each time an entity is modified +|=== + +It's important to realize that, while `@DynamicInsert` has no impact on semantics, the more useful `@DynamicUpdate` annotation _does_ have a subtle side effect. + +[CAUTION] +==== +The wrinkle is that if an entity has no version property, `@DynamicUpdate` opens the possibility of two optimistic transactions concurrently reading and selectively updating a given instance of the entity. +In principle, this might lead to a row with inconsistent column values after both optimistic transactions commit successfully. +==== + +Of course, this consideration doesn't arise for entities with a `@Version` attribute. + +[TIP] +==== +But there's a solution! +Well-designed relational schemas should have _constraints_ to ensure data integrity. +That's true no matter what measures we take to preserve integrity in our program logic. +We may ask Hibernate to add a `check` constraint to our table using the `@Check` annotation. +Check constraints and foreign key constraints can help ensure that a row never contains inconsistent column values. +==== + +[[bytecode-enhancer]] +=== Using the bytecode enhancer + +:enhancer: {doc-user-guide-url}#BytecodeEnhancement + +Hibernate's {enhancer}[bytecode enhancer] enables the following features: + +- _attribute-level lazy fetching_ for basic attributes annotated `@Basic(fetch=LAZY)` and for lazy non-polymorphic associations, +- _interception-based_—instead of the usual _snapshot-based_—detection of modifications. + +To use the bytecode enhancer, we must add the Hibernate plugin to our gradle build: + +[source,groovy] +---- +plugins { + id "org.hibernate.orm" version "6.2.2.Final" +} + +hibernate { enhancement } +---- + +// [discrete] +// ==== Attribute-level lazy fetching + +Consider this field: + +[source,java] +---- +@Entity +class Book { + ... + + @Basic(optional = false, fetch = LAZY) + @Column(length = LONG32) + String fullText; + + ... +} +---- + +The `fullText` field maps to a `clob` or `text` column, depending on the SQL dialect. +Since it's expensive to retrieve the full book-length text, we've mapped the field `fetch=LAZY`, telling Hibernate not to read the field until it's actually used. + +- _Without_ the bytecode enhancer, this instruction is ignored, and the field is always fetched immediately, as part of the initial `select` that retrieves the `Book` entity. +- _With_ bytecode enhancement, Hibernate is able to detect access to the field, and lazy fetching is possible. + +[TIP] +==== +By default, Hibernate fetches all lazy fields of a given entity at once, in a single `select`, when any one of them is accessed. +Using the `@LazyGroup` annotation, it's possible to assign fields to distinct "fetch groups", so that different lazy fields may be fetched independently. +==== + +Similarly, interception lets us implement lazy fetching for non-polymorphic associations without the need for a separate proxy object. +However, if an association is polymorphic, that is, if the target entity type has subclasses, then a proxy is still required. + +// [discrete] +// ==== Interception-based change detection + +Interception-based change detection is a nice performance optimization with a slight cost in terms of correctness. + +- _Without_ the bytecode enhancer, Hibernate keeps a snapshot of the state of each entity after reading from or writing to the database. +When the session flushes, the snapshot state is compared to the current state of the entity to determine if the entity has been modified. +Maintaining these snapshots does have an impact on performance. +- _With_ bytecode enhancement, we may avoid this cost by intercepting writes to the field and recording these modifications as they happen. + +This optimization isn't _completely_ transparent, however. + +[CAUTION] +==== +Interception-based change detection is less accurate than snapshot-based dirty checking. +For example, consider this attribute: + +[source,java] +byte[] image; + +Interception is able to detect writes to the `image` field, that is, replacement of the whole array. +It's not able to detect modifications made directly to the _elements_ of the array, and so such modifications may be lost. +==== + +[[fetch-profiles]] +=== Named fetch profiles + +We've already seen two different ways to override the default <> for an association: + +- <>, and +- the `join fetch` clause in <>, or, equivalently, the method `From.fetch()` in the criteria query API. + +A third way is to define a named fetch profile. +First, we must declare the profile, by annotating a class or package: + +[source,java] +---- +@FetchProfile(name = "EagerBook") +@Entity +class Book { ... } +---- + +Note that even though we've placed this annotation on the `Book` entity, a fetch profile—unlike an entity graph—isn't "rooted" at any particular entity. + +We may specify association fetching strategies using the `fetchOverrides` member of the `@FetchProfile` annotation. + +[NOTE] +==== +Similarly, a JPA <> may be defined using `@NamedEntityGraph`. +But the format of this annotation is _even worse_ than `@FetchProfile(fetchOverrides=...)`, so we can't recommend it. 💀 +==== + +A better way is to annotate an association with the fetch profiles it should be fetched in: + +[source,java] +---- +@FetchProfile(name = "EagerBook", fetchOverrides = { + @FetchProfile.FetchOverride(entity = Book.class, association = "publisher", mode = JOIN), + @FetchProfile.FetchOverride(entity = Book.class, association = "authors", mode = JOIN), + @FetchProfile.FetchOverride(entity = Author.class, association = "person", mode = JOIN) +}) +@Entity +class Book { + ... + + @ManyToOne(fetch = LAZY) + Publisher publisher; + + @ManyToMany + Set authors; + + ... +} +---- +[source,java] +---- +@Entity +class Author { + ... + + @OneToOne + Person person; + + ... +} +---- + +For collections, we may even request subselect fetching: + +[source,java] +---- +@FetchProfile(name = "EagerBook", fetchOverrides = { + @FetchProfile.FetchOverride(entity = Book.class, association = "person", mode = JOIN), + @FetchProfile.FetchOverride(entity = Book.class, association = "authors", mode = JOIN) +}) +@FetchProfile(name = "BookWithAuthorsBySubselect", fetchOverrides = { + @FetchProfile.FetchOverride(entity = Book.class, association = "authors", mode = SUBSELECT) +}) +@Entity +class Book { + ... + + @OneToOne + Person person; + + @ManyToMany + Set authors; + + ... +} +---- + +We may define as many different fetch profiles as we like. + +.Annotations for defining fetch profiles +[%breakable,cols="25,~"] +|=== +| Annotation | Purpose + +| `@FetchProfile` | Declares a named fetch profile, optionally including a list of ``@FetchOverride``s +| `@FetchProfile.FetchOverride` | Declares a fetch strategy override as part of the `@FetchProfile` declaration +|=== + +A fetch profile must be explicitly enabled for a given session: + +[source,java] +---- +session.enableFetchProfile("EagerBook"); +Book eagerBook = session.find(Book.class, bookId); +---- + +So why or when might we prefer named fetch profiles to entity graphs? +Well, it's really hard to say. +It's nice that this feature _exists_, and if you love it, that's great. +But Hibernate offers alternatives that we think are more compelling most of the time. + +The one and only advantage unique to fetch profiles is that they let us very selectively request subselect fetching. +We can't do that with entity graphs, and we can't do it with HQL. diff --git a/documentation/src/main/asciidoc/introduction/Configuration.adoc b/documentation/src/main/asciidoc/introduction/Configuration.adoc new file mode 100644 index 000000000000..597fa4d682e3 --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Configuration.adoc @@ -0,0 +1,471 @@ +[[configuration]] +== Configuration and bootstrap + +We would love to make this section short. +Unfortunately, there's several distinct ways to configure and bootstrap Hibernate, and we're going to have to describe at least two of them in detail. + +The four basic ways to obtain an instance of Hibernate are shown in the following table: + +[%breakable,cols="50,50"] +|=== + +| Using the standard JPA-defined XML, and the operation `Persistence.createEntityManagerFactory()` +| Usually chosen when portability between JPA implementations is important. + +| Using the `Configuration` class to construct a `SessionFactory` +| When portability between JPA implementations is not important, this option is quicker, adds some flexibility and saves a typecast. + +| Using the more complex APIs defined in `org.hibernate.boot` +| Used primarily by framework integrators, this option is outside the scope of this document. + +| By letting the container take care of the bootstrap process and of injecting the `SessionFactory` or `EntityManagerFactory` +| Used in a container environment like WildFly or Quarkus. +|=== + +Here we'll focus on the first two options. + +:hibernate-quarkus: https://quarkus.io/guides/hibernate-orm + +.Hibernate in containers +**** +Actually, the last option is extremely popular, since every major Java application server and microservice framework comes with built-in support for Hibernate. +Such container environments typically also feature facilities to automatically manage the lifecycle of an `EntityManager` or `Session` and its association with container-managed transactions. + +To learn how to configure Hibernate in such a container environment, you'll need to refer to the documentation of your chosen container. +For Quarkus, here's the {hibernate-quarkus}[relevant documentation]. +**** + +If you're using Hibernate outside of a container environment, +you'll need to: + +- include Hibernate ORM itself, along with the appropriate JDBC driver, as dependencies of your project, and +- configure Hibernate with information about your database, +by specifying configuration properties. + +[[required-dependencies]] +=== Including Hibernate in your project build + +First, add the following dependency to your project: + +---- +org.hibernate.orm:hibernate-core:{version} +---- + +Where `{version}` is the version of Hibernate you're using. + +You'll also need to add a dependency for the JDBC +driver for your database. + +.JDBC driver dependencies +[%breakable,cols="50,~"] +|=== +| Database | Driver dependency + +| PostgreSQL or CockroachDB | `org.postgresql:postgresql:{version}` +| MySQL or TiDB | `com.mysql:mysql-connector-j:{version}` +| MariaDB | `org.mariadb.jdbc:mariadb-java-client:{version}` +| DB2 | `com.ibm.db2:jcc:{version}` +| SQL Server | `com.microsoft.sqlserver:mssql-jdbc:${version}` +| Oracle | `com.oracle.database.jdbc:ojdbc11:${version}` +| H2 | `com.h2database:h2:{version}` +| HSQLDB | `org.hsqldb:hsqldb:{version}` +|=== + +Where `{version}` is the latest version of the JDBC driver for your databse. + +[[optional-dependencies]] +=== Optional dependencies + +:slf4j: http://www.slf4j.org/ +:enhancer: {doc-user-guide-url}#tooling-gradle +:agroal: https://agroal.github.io +:jackson: https://github.com/FasterXML/jackson +:yasson: https://projects.eclipse.org/projects/ee4j.yasson +:validator: https://hibernate.org/validator +:ehcache: https://www.ehcache.org +:infinispan: https://infinispan.org +:generator: https://hibernate.org/orm/tooling/ +:caffeine: https://github.com/ben-manes/caffeine/ +:bean-validation: https://beanvalidation.org +:query-validator: https://github.com/hibernate/query-validator/ + +Optionally, you might also add any of the following additional features: + +.Optional dependencies +[%breakable,cols="50,~"] +|=== +| Optional feature | Dependencies + +| An {slf4j}[SLF4J] logging implementation | +`org.apache.logging.log4j:log4j-core` + +or `org.slf4j:slf4j-jdk14` +| A JDBC connection pool, for example, {agroal}[Agroal] | +`org.hibernate.orm:hibernate-agroal` + +and `io.agroal:agroal-pool` +| The {generator}[Hibernate Metamodel Generator], especially if you're using the JPA criteria query API | `org.hibernate.orm:hibernate-jpamodelgen` +| The {query-validator}[Query Validator], for compile-time checking of HQL | `org.hibernate:query-validator` +| {validator}[Hibernate Validator], an implementation of {bean-validation}[Bean Validation] | +`org.hibernate.validator:hibernate-validator` + +and `org.glassfish:jakarta.el` +| Local second-level cache support via JCache and {ehcache}[EHCache] | `org.hibernate.orm:hibernate-jcache` + +and `org.ehcache:ehcache` +| Local second-level cache support via JCache and {caffeine}[Caffeine]| `org.hibernate.orm:hibernate-jcache` + +and `com.github.ben-manes.caffeine:jcache` +| Distributed second-level cache support via {infinispan}[Infinispan] | `org.infinispan:infinispan-hibernate-cache-v60` +// | SCRAM authentication support for PostgreSQL | `com.ongres.scram:client:2.1` +| A JSON serialization library for working with JSON datatypes, for example, {jackson}[Jackson] or {yasson}[Yasson] | +`com.fasterxml.jackson.core:jackson-databind` + +or `org.eclipse:yasson` +| <> | `org.hibernate.orm:hibernate-spatial` +| <>, for auditing historical data | `org.hibernate.orm:hibernate-envers` +|=== + +You might also add the Hibernate {enhancer}[bytecode enhancer] to your +Gradle build if you want to use <>. + +[[configuration-jpa]] +=== Configuration using JPA XML + +Sticking to the JPA-standard approach, we would provide a file named `persistence.xml`, which we usually place in the `META-INF` directory of a _persistence archive_, that is, of the `.jar` file or directory which contains our entity classes. + +[source,xml] +---- + + + + + org.hibernate.example.Book + org.hibernate.example.Author + + + + + + + + + + + + + + + + + + + + + + +---- +The `` element defines a named _persistence unit_, that is: + +- a collection of associated entity types, along with +- a set of default configuration settings, which may be augmented or overridden at runtime. + +Each `` element specifies the fully-qualified name of an entity class. + +.Scanning for entity classes +**** +In some container environments, for example, in any EE container, the `` elements are unnecessary, since the container will scan the archive for annotated classes, and automatically recognize any class annotated `@Entity`. +**** + +Each `` element specifies a _configuration property_ and its value. +Note that: + +- the configuration properties in the `jakarta.persistence` namespace are standard properties defined by the JPA spec, and +- properties in the `hibernate` namespace are specific to Hibernate. + +We may obtain an `EntityManagerFactory` by calling `Persistence.createEntityManagerFactory()`: + +[source,java] +---- +EntityManagerFactory entityManagerFactory = + Persistence.createEntityManagerFactory("org.hibernate.example"); +---- + +If necessary, we may override configuration properties specified in `persistence.xml`: + +[source,java] +---- +EntityManagerFactory entityManagerFactory = + Persistence.createEntityManagerFactory("org.hibernate.example", + Map.of(AvailableSettings.JAKARTA_JDBC_PASSWORD, password)); +---- + +[[configuration-api]] +=== Configuration using Hibernate API + +Alternatively, the venerable class `org.hibernate.cfg.Configuration` allows an instance of Hibernate to be configured in Java code: + +[source,java] +---- +SessionFactory sessionFactory = + new Configuration() + .addAnnotatedClass(Book.class) + .addAnnotatedClass(Author.class) + // PostgreSQL + .setProperty(AvailableSettings.JAKARTA_JDBC_URL, "jdbc:postgresql://localhost/example") + // Credentials + .setProperty(AvailableSettings.JAKARTA_JDBC_USER, user) + .setProperty(AvailableSettings.JAKARTA_JDBC_PASSWORD, password) + // Automatic schema export + .setProperty(AvailableSettings.JAKARTA_HBM2DDL_DATABASE_ACTION, + Action.SPEC_ACTION_DROP_AND_CREATE) + // SQL statement logging + .setProperty(AvailableSettings.SHOW_SQL, TRUE.toString()) + .setProperty(AvailableSettings.FORMAT_SQL, TRUE.toString()) + .setProperty(AvailableSettings.HIGHLIGHT_SQL, TRUE.toString()) + // Create a new SessionFactory + .buildSessionFactory(); +---- + +The `Configuration` class has survived almost unchanged since the very earliest (pre-1.0) versions of Hibernate, and so it doesn't look particularly modern. +On the other hand, it's very easy to use, and exposes some options that `persistence.xml` doesn't support. + +:native-bootstrap: {doc-user-guide-url}#bootstrap-native +:boot: {versionDocBase}/javadocs/org/hibernate/boot/package-summary.html + +.Advanced configuration options +**** +Actually, the `Configuration` class is just a very simple facade for the more modern, much more powerful—but more complex—API defined in the package `org.hibernate.boot`. +This API is useful if you have very advanced requirements, for example, if you're writing a framework or implementing a container. +You'll find more information in the {native-bootstrap}[User Guide], and in the {boot}[package-level documentation] of `org.hibernate.boot`. +**** + +[[configuration-properties]] +=== Configuration using Hibernate properties file + +If we're using the Hibernate `Configuration` API, but we don't want to put certain configuration properties directly in the Java code, we can specify them in a file named `hibernate.properties`, and place the file in the root classpath. + +[source,properties] +---- +# PostgreSQL +jakarta.persistence.jdbc.url=jdbc:postgresql://localhost/example +# Credentials +jakarta.persistence.jdbc.user=hibernate +jakarta.persistence.jdbc.password=zAh7mY$2MNshzAQ5 + +# SQL statement logging +hibernate.show_sql=true +hibernate.format_sql=true +hibernate.highlight_sql=true +---- + +[[basic-configuration-settings]] +=== Basic configuration settings + +The class `org.hibernate.cfg.AvailableSettings` enumerates all the configuration properties understood by Hibernate. + +Of course, we're not going to cover every useful configuration setting in this chapter. +Instead, we'll mention the ones you need to get started, and come back to some other important settings later, especially when we talk about performance tuning. + +[TIP] +==== +Hibernate has many—too many—switches and toggles. +Please don't go crazy messing about with these settings; most of them are rarely needed, and many only exist to provide backward compatibility with older versions of Hibernate. +With rare exception, the default behavior of every one of these settings was carefully chosen to be _the behavior we recommend_. +==== + +The properties you really do need to get started are these three: + +.JDBC connection settings +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `jakarta.persistence.jdbc.url` | JDBC URL of your database +| `jakarta.persistence.jdbc.user` and `jakarta.persistence.jdbc.password` | Your database credentials +|=== + +[IMPORTANT] +// .You don't need `hibernate.dialect` anymore! +==== +In Hibernate 6, you don't need to specify `hibernate.dialect`. +The correct Hibernate SQL `Dialect` will be determined for you automatically. +The only reason to specify this property is if you're using a custom user-written `Dialect` class. + +Similarly, neither `hibernate.connection.driver_class` nor `jakarta.persistence.jdbc.driver` is needed when working with one of the supported databases. +==== + +Pooling JDBC connections is an extremely important performance optimization. +You can set the size of Hibernate's built-in connection pool using this property: + +.Built-in connection pool size +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.connection.pool_size` | The size of the built-in connection pool +|=== + +[CAUTION] +// .The default connection pool is not meant for production use +==== +By default, Hibernate uses a simplistic built-in connection pool. +This pool is not meant for use in production, and later, when we discuss performance, we'll see how to <>. +==== + +Alternatively, in a container environment, you'll need at least one of these properties: + +.Transaction management settings +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `jakarta.persistence.transactionType` | (Optional, defaults to `JTA`) + Determines if transaction management is via JTA or resource-local transactions. + Specify `RESOURCE_LOCAL` if JTA should not be used. +| `jakarta.persistence.jtaDataSource` | JNDI name of a JTA datasource +| `jakarta.persistence.nonJtaDataSource` | JNDI name of a non-JTA datasource +|=== + +In this case, Hibernate obtains pooled JDBC database connections from a container-managed `DataSource`. + +[[automatic-schema-export]] +=== Automatic schema export + +You can have Hibernate infer your database schema from the mapping +annotations you've specified in your Java code, and export the schema at +initialization time by specifying one or more of the following configuration +properties: + +.Schema management settings +[%breakable,cols="52,~"] +|=== +| Configuration property name | Purpose + +| `jakarta.persistence.schema-generation.database.action` +a| * If `drop-and-create`, first drop the schema and then export tables, sequences, and constraints +* If `create`, export tables, sequences, and constraints, without attempting to drop them first +* If `create-drop`, drop the schema and recreate it on `SessionFactory` startup +Additionally, drop the schema on `SessionFactory` shutdown +* If `drop`, drop the schema on `SessionFactory` shutdown +* If `validate`, validate the database schema without changing it +* If `update`, only export what's missing in the schema + +| `jakarta.persistence.create-database-schemas` +| (Optional) If `true`, automatically create schemas and catalogs + +| `jakarta.persistence.schema-generation.create-source` +| (Optional) If `metadata-then-script` or `script-then-metadata`, execute an additional SQL script when exported tables and sequences + +| `jakarta.persistence.schema-generation.create-script-source` +| (Optional) The name of the SQL script to be executed +|=== + +This feature is extremely useful for testing. + +[TIP] +// .Importing test or reference data +==== +The easiest way to pre-initialize a database with test or "reference" data is to place a list of SQL `insert` statements in a file named, for example, `import.sql`, and specify the path to this file using the property `jakarta.persistence.schema-generation.create-script-source`. + +This approach is cleaner than writing Java code to instantiate entity instances and calling `persist()` on each of them. +==== + +[TIP] +// .Programmatic schema export +==== +Alternatively, the `SchemaManager` API allow you to control schema export programmatically. + +[source,java] +sessionFactory.getSchemaManager().exportMappedObjects(true); +==== + +[[logging-generated-sql]] +=== Logging the generated SQL + +:log4j: https://github.com/hibernate/hibernate-reactive/blob/main/examples/session-example/src/main/resources/log4j2.properties + +To see the generated SQL as it's sent to the database, you have two options. + +One way is to set the property `hibernate.show_sql` to `true`, and Hibernate will log SQL direct to the console. +You can make the output much more readable by enabling formatting or highlighting. +These settings really help when troubleshooting the generated SQL statements. + +.Settings for SQL logging to the console +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.show_sql` | If `true`, log SQL directly to the console +| `hibernate.format_sql` | If `true`, log SQL in a multiline, indented format +| `hibernate.highlight_sql` | If `true`, log SQL with syntax highlighting via ANSI escape codes +|=== + +Alternatively, you can enable debug-level logging for the category `org.hibernate.SQL` using your preferred SLF4J logging implementation. + +For example, if you're using Log4J 2 (as above in <>), add these lines to your `log4j2.properties` file: + +[source,properties] +---- +# SQL execution +logger.hibernate.name = org.hibernate.SQL +logger.hibernate.level = debug + +# JDBC parameter binding +logger.jdbc-bind.name=org.hibernate.orm.jdbc.bind +logger.jdbc-bind.level=trace +# JDBC result set extraction +logger.jdbc-extract.name=org.hibernate.orm.jdbc.extract +logger.jdbc-extract.level=trace + +---- + +But with this approach we miss out on the pretty highlighting. + +[[minimizing]] +=== Minimizing repetitive mapping information + +The following properties are very useful for minimizing the amount of information you'll need to explicitly specify in `@Table` and `@Column` annotations, which we'll discuss below in <>: + +.Settings for minimizing explicit mapping information +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.default_schema` | A default schema name for entities which do not explicitly declare one +| `hibernate.default_catalog` | A default catalog name for entities which do not explicitly declare one +| `hibernate.physical_naming_strategy` | A `PhysicalNamingStrategy` implementing your database naming standards +| `hibernate.implicit_naming_strategy` | An `ImplicitNamingStrategy` which specifies how "logical" names of relational objects should be inferred when no name is specified in annotations +|=== + +[TIP] +// .Implement your naming standards as a `PhysicalNamingStrategy` +==== +Writing your own `PhysicalNamingStrategy` and/or `ImplicitNamingStrategy` is an especially good way to reduce the clutter of annotations on your entity classes, and to implement your database naming conventions, and so we think you should do it for any nontrivial data model. +We'll have more to say about them in <>. +==== + +[[nationalized-chars]] +=== Nationalized character data in SQL Server + +_By default,_ SQL Server's `char` and `varchar` types don't accommodate Unicode data. +But a Java string may contain any Unicode character. +So, if you're working with SQL Server, you might need to force Hibernate to use the `nchar` and `nvarchar` column types. + +.Setting the use of nationalized character data +[%breakable,cols="40,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.use_nationalized_character_data` | Use `nchar` and `nvarchar` instead of `char` and `varchar` +|=== + +On the other hand, if only _some_ columns store nationalized data, use the `@Nationalized` annotation to indicate fields of your entities which map these columns. + +[TIP] +// .Configuring SQL Server to use UTF-8 by default +==== +Alternatively, you can configure SQL Server to use the UTF-8 enabled collation `_UTF8`. +==== + diff --git a/documentation/src/main/asciidoc/introduction/Entities.adoc b/documentation/src/main/asciidoc/introduction/Entities.adoc new file mode 100644 index 000000000000..32e67474d65b --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Entities.adoc @@ -0,0 +1,1590 @@ +[[entities]] +== Entities + +An _entity_ is a Java class which represents data in a relational database table. +We say that the entity _maps_ or _maps to_ the table. +Much less commonly, an entity might aggregate data from multiple tables, but we'll get to that <>. + +An entity has _attributes_—properties or fields—which map to columns of the table. +In particular, every entity must have an _identifier_ or _id_, which maps to the primary key of the table. +The id allows us to uniquely associate a row of the table with an instance of the Java class, at least within a given _persistence context_. + +We'll explore the idea of a persistence context <>. For now, think of it as a one-to-one mapping between ids and entity instances. + +An instance of a Java class cannot outlive the virtual machine to which it belongs. +But we may think of an entity instance having a lifecycle which transcends a particular instantiation in memory. +By providing its id to Hibernate, we may re-materialize the instance in a new persistence context, as long as the associated row is present in the database. +Therefore, the operations `persist()` and `remove()` may be thought of as demarcating the beginning and end of the lifecycle of an entity, at least with respect to persistence. + +Thus, an id represents the _persistent identity_ of an entity, an identity that outlives a particular instantiation in memory. +And this is an important difference between entity class itself and the values of its attributes—the entity has a persistent identity, and a well-defined lifecycle with respect to persistence, whereas a `String` or `List` representing one of its attribute values doesn't. + +An entity usually has associations to other entities. +Typically, an association between two entities maps to a foreign key in one of the database tables. +A group of mutually associated entities is often called a _domain model_, though _data model_ is also a perfectly good term. + +[[entity-clases]] +=== Entity classes + +An entity must: + +- be a non-`final` class, +- with a non-`private` constructor with no parameters. + +On the other hand, the entity class may be either concrete or `abstract`, and it may have any number of additional constructors. + +[TIP] +// .Inner entity classes +==== +An entity class may be a `static` inner class. +==== + +Every entity class must be annotated `@Entity`. + +[source,java] +---- +@Entity +class Book { + Book() {} + ... +} +---- + +Alternatively, the class may be identified as an entity type by providing an XML-based mapping for the class. + +.Mapping entities using XML +**** +When XML-based mappings are used, the `` element is used to declare an entity class: + +[source,xml] +---- + + org.hibernate.example + + + ... + + + ... + +---- +Since the `orm.xml` mapping file format defined by the JPA specification was modelled closely on the annotation-based mappings, it's usually easy to go back and forth between the two options. +**** + +We won't have much more to say about XML-based mappings in this Introduction, since it's not our preferred way to do things. + +."Dynamic" models +**** +:maps: {doc-user-guide-url}#dynamic-model +:envers: https://hibernate.org/orm/envers/ +We love representing entities as classes because the classes give us a _type-safe_ model of our data. +But Hibernate also has the ability to represent entities as detyped instances of `java.util.Map`. +There's information in the {maps}[User Guide], if you're curious. + +This must sound like a weird feature for a project that places importance on type-safety. +Actually, it's a useful capability for a very particular sort of generic code. +For example, {envers}[Hibernate Envers] is a great auditing/versioning system for Hibernate entities. +Envers makes use of maps to represent its _versioned model_ of the data. +**** + +[[access-type]] +=== Access types + +Each entity class has a default _access type_, either: + +- direct _field access_, or +- _property access_. + +Hibernate automatically determines the access type from the location of attribute-level annotations. +Concretely: + +- if a field is annotated `@Id`, field access is used, or +- if a getter method is annotated `@Id`, property access is used. + +Back when Hibernate was just a baby, property access was quite popular in the Hibernate community. +Today, however, field access is _much_ more common. + +[NOTE] +// .Explicit access type +==== +The default access type may be specified explicitly using the `@Access` annotation, but we strongly discourage this, since it's ugly and never necessary. +==== + +[IMPORTANT] +// .Mapping annotations should be placed consistently +==== +Mapping annotations should be placed consistently: + +- if `@Id` annotates a field, the other mapping annotations should also be applied to fields, or, +- if `@Id` annotates a getter, the other mapping annotations should be applied to getters. + +It is in principle possible to mix field and property access using explicit `@Access` annotations at the attribute level. +We don't recommend doing this. +==== + +An entity class like `Book`, which does not extend any other entity class is called a _root entity_. +Every root entity must declare an identifier attribute. + +[[entity-inheritance]] +=== Entity class inheritance + +An entity class may `extend` another entity class. + +[source,java] +---- +@Entity +class AudioBook extends Book { + AudioBook() {} + ... +} +---- + +A subclass entity inherits every persistent attribute of every entity it extends. + +A root entity may also extend another class and inherit mapped attributes from the other class. +But in this case, the class which declares the mapped attributes must be annotated `@MappedSuperclass`. + +[source,java] +---- +@MappedSuperclass +class Versioned { + ... +} + +@Entity +class Book extends Versioned { + ... +} +---- + +A root entity class must declare an attribute annotated `@Id`, or inherit one from a `@MappedSuperclass`. +A subclass entity always inherits the identifier attribute of the root entity. +It may not declare its own `@Id` attribute. + +[[identifier-attributes]] +=== Identifier attributes + +An identifier attribute is usually a field: + +[source,java] +---- +@Entity +class Book { + Book() {} + + @Id + Long id; + + ... +} +---- + +But it may be a property: + +[source,java] +---- +@Entity +class Book { + Book() {} + + private Long id; + + @Id + Long getId() { return id; } + void setId(Long id) { this.id = id; } + + ... +} +---- + +An identifier attribute must be annotated `@Id` or `@EmbeddedId`. + +Identifier values may be: + +- assigned by the application, that is, by your Java code, or +- generated and assigned by Hibernate. + +We'll discuss the second option first. + +[[generated-identifiers]] +=== Generated identifiers + +An identifier is often system-generated, in which case it should be annotated `@GeneratedValue`: + +[source,java] +---- +@Id @GeneratedValue +Long id; +---- + +[TIP] +// .Using surrogate keys +==== +System-generated identifiers, or _surrogate keys_ make it easier to evolve or refactor the relational data model. +If you have the freedom to define the relational schema, we recommend the use of surrogate keys. +On the other hand, if, as is more common, you're working with a pre-existing database schema, you might not have the option. +==== + +JPA defines the following strategies for generating ids, which are enumerated by `GenerationType`: + +.Standard id generation strategies +[%breakable,cols="25,15,~"] +|=== +| Strategy | Java type | Implementation + +| `GenerationType.UUID` | `UUID` or `String` | A Java `UUID` +| `GenerationType.IDENTITY` | `Long` or `Integer` | An identity or autoincrement column +| `GenerationType.SEQUENCE` | `Long` or `Integer` | A database sequence +| `GenerationType.TABLE` | `Long` or `Integer` | A database table +| `GenerationType.AUTO` | `Long` or `Integer` | Selects `SEQUENCE`, `TABLE`, or `UUID` based on the identifier type and capabilities of the database +|=== + +For example, this UUID is generated in Java code: + +[source,java] +---- +@Id @GeneratedValue UUID id; // AUTO strategy selects UUID based on the field type +---- + +This id maps to a SQL `identity`, `auto_increment`, or `bigserial` column: + +[source,java] +---- +@Id @GeneratedValue(strategy=IDENTITY) Long id; +---- + +The `@SequenceGenerator` and `@TableGenerator` annotations allow further control over `SEQUENCE` and `TABLE` generation respectively. + +Consider this sequence generator: + +[source,java] +---- +@SequenceGenerator(name="bookSeq", sequenceName="seq_book", initialValue = 5, allocationSize=10) +---- + +Values are generated using a database sequence defined as follows: + +[source,sql] +---- +create sequence seq_book start with 5 increment by 10 +---- + +Notice that Hibernate doesn't have to go to the database every time a new identifier is needed. +Instead, a given process obtains a block of ids, of size `allocationSize`, and only needs to hit the database each time the block is exhausted. +Of course, the downside is that generated identifiers are not contiguous. + +[CAUTION] +// .Check the `initialValue` and `allocationSize` +==== +If you let Hibernate export your database schema, the sequence definition will have the right `start with` and `increment` values. +But if you're working with a database schema managed outside Hibernate, make sure the `initialValue` and `allocationSize` members of `@SequenceGenerator` match the `start with` and `increment` specified in the DDL. +==== + +Any identifier attribute may now make use of the generator named `bookSeq`: + +[source,java] +---- +@Id +@GeneratedValue(strategy=SEQUENCE, generator="bookSeq") // reference to generator defined elsewhere +Long id; +---- + +Actually, it's extremely common to place the `@SequenceGenerator` annotation on the `@Id` attribute that makes use of it: + +[source,java] +---- +@Id +@GeneratedValue(strategy=SEQUENCE, generator="bookSeq") // reference to generator defined below +@SequenceGenerator(name="bookSeq", sequenceName="seq_book", initialValue = 5, allocationSize=10) +Long id; +---- + +[NOTE] +// .JPA id generators may be shared between entities +==== +JPA id generators may be shared between entities. +A `@SequenceGenerator` or `@TableGenerator` must have a name, and may be shared between multiple id attributes. +This fits somewhat uncomfortably with the common practice of annotating the `@Id` attribute which makes use of the generator! +==== + +As you can see, JPA provides quite adequate support for the most common strategies for system-generated ids. +However, the annotations themselves are a bit more intrusive than they should be, and there's no well-defined way to extend this framework to support custom strategies for id generation. +Nor may `@GeneratedValue` be used on a property not annotated `@Id`. +Since custom id generation is a rather common requirement, Hibernate provides a very carefully-designed framework for user-defined ``Generator``s, which we'll discuss in <>. + +[[natural-identifiers]] +=== Natural keys as identifiers + +Not every identifier attribute maps to a (system-generated) surrogate key. +Primary keys which are meaningful to the user of the system are called _natural keys_. + +When the primary key of a table is a natural key, we don't annotate the identifier attribute `@GeneratedValue`, and it's the responsibility of the application code to assign a value to the identifier attribute. + +[source,java] +---- +@Entity +class Book { + @Id + String isbn; + + ... +} +---- + +Of particular interest are natural keys which comprise more than one database column, and such natural keys are called _composite keys_. + +[[composite-identifiers]] +=== Composite identifiers + +If your database uses composite keys, you'll need more than one identifier attribute. +There are two ways to map composite keys in JPA: + +- using an `@IdClass`, or +- using an `@EmbeddedId`. + +Perhaps the most immediately-natural way to represent this in an entity class is with multiple fields annotated `@Id`, for example: + +[source,java] +---- +@Entity +@IdClass(BookId.class) +class Book { + Book() {} + + @Id + String isbn; + + @Id + int printing; + + ... +} +---- + +But this approach comes with a problem: what object can we use to identify a `Book` and pass to methods like `find()` which accept an identifier? + +The solution is to write a separate class with fields that match the identifier attributes of the entity. +The `@IdClass` annotation of the `Book` entity identifies the id class to use for that entity: + +[source,java] +---- +class BookId { + + String isbn; + int printing; + + BookId() {} + + BookId(String isbn, int printing) { + this.isbn = isbn; + this.printing = printing; + } + + @Override + public boolean equals(Object other) { + if (other instanceof BookId) { + BookId bookId = (BookId) other; + return bookId.isbn.equals(isbn) + && bookId.printing == printing; + } + else { + return false; + } + } + + @Override + public int hashCode() { + return isbn.hashCode(); + } +} +---- + +Every id class should override `equals()` and `hashCode()`. + +This is not our preferred approach. +Instead, we recommend that the `BookId` class be declared as an `@Embeddable` type: + +[source,java] +---- +@Embeddable +class BookId { + + String isbn; + + int printing; + + BookId() {} + + BookId(String isbn, int printing) { + this.isbn = isbn; + this.printing = printing; + } + + ... +} +---- + +We'll learn more about <> below. + +Now the entity class may reuse this definition using `@EmbeddedId`, and the `@IdClass` annotation is no longer required: + +[source,java] +---- +@Entity +class Book { + Book() {} + + @EmbeddedId + BookId bookId; + + ... +} +---- + +This second approach eliminates some duplicated code. + +Either way, we may now use `BookId` to obtain instances of `Book`: + +[source,java] +---- +Book book = session.find(Book.class, new BookId(isbn, printing)); +---- + +[[version-attributes]] +=== Version attributes + +An entity may have an attribute which is used by Hibernate for optimistic lock checking. +A version attribute is usually of type `Integer`, `Short`, `Long`, `LocalDateTime`, `OffsetDateTime`, `ZonedDateTime`, or `Instant`. + +[source,java] +---- +@Version +LocalDateTime lastUpdated; +---- + +Version attributes are automatically assigned by Hibernate when an entity is made persistent, and automatically incremented or updated each time the entity is updated. + +[TIP] +// .Optimistic locking in Hibernate +==== +If an entity doesn't have a version number, which often happens when mapping legacy data, we can still do optimistic locking. +The `@OptimisticLocking` annotation lets us specify that optimistic locks should be checked by validating the values of `ALL` fields, or only the `DIRTY` fields of the entity. +And the `@OptimisticLock` annotation lets us selectively exclude certain fields from optimistic locking. +==== + +The `@Id` and `@Version` attributes we've already seen are just specialized examples of _basic attributes_. + +[[natural-id-attributes]] +=== Natural id attributes + +Even when an entity has a surrogate key, it should always be possible to write down a combination of fields which uniquely identifies an instance of the entity, from the point of view of the user of the system. +This combination of fields is its natural key. +Above, we <> the case where the natural key coincides with the primary key. +Here, the natural key is a second unique key of the entity, distinct from its surrogate primary key. + +[IMPORTANT] +// .What if my entity has no natural key? +==== +If you can't identify a natural key, it might be a sign that you need to think more carefully about some aspect of your data model. +If an entity doesn't have a meaningful unique key, then it's impossible to say what event or object it represents in the "real world" outside your program. +==== + +Since it's _extremely_ common to retrieve an entity based on its natural key, Hibernate has a way to mark the attributes of the entity which make up its natural key. +Each attribute must be annotated `@NaturalId`. + +[source,java] +---- +@Entity +class Book { + Book() {} + + @Id @GeneratedValue + Long id; // the system-generated surrogate key + + @NaturalId + String isbn; // belongs to the natural key + + @NaturalId + int printing; // also belongs to the natural key + + ... +} +---- + +Hibernate automatically generates a `UNIQUE` constraint on the columns mapped by the annotated fields. + +[TIP] +==== +Consider using the natural id attributes to implement <>. +==== + +The payoff for doing this extra work, as we will see <>, is that we can take advantage of optimized natural id lookups that make use of the second-level cache. + +Note that even when you've identified a natural key, we still recommend the use of a generated surrogate key in foreign keys, since this makes your data model _much_ easier to change. + +[[basic-attributes]] +=== Basic attributes + +A _basic_ attribute of an entity is a field or property which maps to a single column of the associated database table. +The JPA specification defines a quite limited set of basic types: + +.JPA-standard basic attribute types +[%breakable,cols="30,^14,~"] +|==== +| Classification | Package | Types + +| Primitive types | | `boolean`, `int`, `double`, etc +| Primitive wrappers | `java.lang` | `Boolean`, `Integer`, `Double`, etc +| Strings | `java.lang` | `String` +| Arbitrary-precision numeric types | `java.math` | `BigInteger`, `BigDecimal` +| Date/time types | `java.time` | `LocalDate`, `LocalTime`, `LocalDateTime`, `OffsetDateTime`, `Instant` +| Deprecated date/time types 💀 | `java.util` | `Date`, `Calendar` +| Deprecated JDBC date/time types 💀 | `java.sql` | `Date`, `Time`, `Timestamp` +| Binary and character arrays | | `byte[]`, `char[]` +| UUIDs | `java.util` | `UUID` +| Enumerated types | | Any `enum` +| Serializable types | | Any type which implements `java.io.Serializable` +|==== + +[IMPORTANT] +// .Please don't use `Date`! +==== +We're begging you to use types from the `java.time` package instead of anything which inherits `java.util.Date`. +==== + +[CAUTION] +// .Serialization is usually a bad idea +==== +Serializing a Java object and storing its binary representation in the database is usually wrong. +As we'll soon see in <>, Hibernate has much better ways to handle complex Java objects. +==== + +Hibernate slightly extends this list with the following types: + +.Additional basic attribute types in Hibernate +[%breakable,cols="30,^14,56"] +|==== +| Classification | Package | Types + +| Additional date/time types | `java.time` | `Duration`, `ZoneId`, `ZoneOffset`, `Year`, and even `ZonedDateTime` +| JDBC LOB types | `java.sql` | `Blob`, `Clob`, `NClob` +| Java class object | `java.lang` | `Class` +| Miscellaneous types | `java.util` | `Currency`, `URL`, `TimeZone` +|==== + +The `@Basic` annotation explicitly specifies that an attribute is basic, but it's often not needed, since attributes are assumed basic by default. +On the other hand, if a non-primitively-typed attribute cannot be null, use of `@Basic(optional=false)` is highly recommended. + +[source,java] +---- +@Basic(optional=false) String firstName; +@Basic(optional=false) String lastName; +String middleName; // may be null +---- + +Note that primitively-typed attributes are inferred `NOT NULL` by default. + +.How to make a column `not null` in JPA +**** +There are two standard ways to add a `NOT NULL` constraint to a mapped column in JPA: + +- using `@Basic(optional=false)`, or +- using `@Column(nullable=false)`. + +You might wonder what the difference is. + +Well, it's perhaps not obvious to a casual user of the JPA annotations, but they actually come in two "layers": + +- annotations like `@Entity`, `@Id`, and `@Basic` belong to the _logical_ layer, the subject of the current chapter—they specify the semantics of your Java domain model, whereas +- annotations like `@Table` and `@Column` belong to the _mapping_ layer, the topic of the <>—they specify how elements of the domain model map to objects in the relational database. + +Information may be inferred from the logical layer down to the mapping layer, but is never inferred in the opposite direction. + +Now, the `@Column` annotation, to whom we'll be properly <> a bit later, belongs to the _mapping_ layer, and so its `nullable` member only affects schema generation (resulting in a `not null` constraint in the generated DDL). +On the other hand, the `@Basic` annotation belongs to the logical layer, and so an attribute marked `optional=false` is checked by Hibernate before it even writes an entity to the database. +Note that: + +- `optional=false` implies `nullable=false`, but +- `nullable=false` _does not_ imply `optional=false`. + +Therefore, we prefer `@Basic(optional=false)` to `@Column(nullable=false)`. + +[TIP] +==== +But wait! +An even better solution is to use the `@NotNull` annotation from Bean Validation. +Just add Hibernate Validator to your project build, as described in <>. +==== +**** + +[[enums]] +=== Enumerated types + +We included Java ``enum``s on the list above. +An enumerated type is considered a sort of basic type, but since most databases don't have a native `ENUM` type, JPA provides a special `@Enumerated` annotation to specify how the enumerated values should be represented in the database: + +- by default, an enum is stored as an integer, the value of its `ordinal()` member, but +- if the attribute is annotated `@Enumerated(STRING)`, it will be stored as a string, the value of its `name()` member. + +[source,java] +---- +//here, an ORDINAL encoding makes sense +@Enumerated +@Basic(optional=false) +DayOfWeek dayOfWeek; + +//but usually, a STRING encoding is better +@Enumerated(EnumType.STRING) +@Basic(optional=false) +Status status; + +---- + +In Hibernate 6, an `enum` annotated `@Enumerated(STRING)` is mapped to: + +- a `VARCHAR` column type with a `CHECK` constraint on most databases, or +- an `ENUM` column type on MySQL. + +Any other ``enum`` is mapped to a `TINYINT` column with a `CHECK` constraint. + +[TIP] +// .It's usually better to persist `enum` values by their names +==== +JPA picks the wrong default here. +In most cases, storing an integer encoding of the `enum` value makes the relational data harder to interpret. + +Even considering `DayOfWeek`, the encoding to integers is ambiguous. +If you check `java.time.DayOfWeek`, you'll notice that `SUNDAY` is encoded as `6`. +But in the country I was born, `SUNDAY` is the _first_ day of the week! + +So we prefer `@Enumerated(STRING)` for most `enum` attributes. +==== + +An interesting special case is PostgreSQL. +Postgres supports _named_ `ENUM` types, which must be declared using a DDL `CREATE TYPE` statement. +Sadly, these `ENUM` types aren't well-integrated with the language nor well-supported by the Postgres JDBC driver, so Hibernate doesn't use them by default. +But if you would like to use a named enumerated type on Postgres, just annotate your `enum` attribute like this: + +[source,java] +---- +@JdbcTypeCode(SqlTypes.NAMED_ENUM) +@Basic(optional=false) +Status status; +---- + +The limited set of pre-defined basic attribute types can be stretched a bit further by supplying a _converter_. + +[[converters]] +=== Converters + +A JPA `AttributeConverter` is responsible for: + +- converting a given Java type to one of the types listed above, and/or +- perform any other sort of pre- and post-processing you might need to perform on a basic attribute value before writing and reading it to or from the database. + +Converters substantially widen the set of attribute types that can be handled by JPA. + +There are two ways to apply a converter: + +- the `@Convert` annotation applies an `AttributeConverter` to a particular entity attribute, or +- the `@Converter` annotation (or, alternatively, the `@ConverterRegistration` annotation) registers an `AttributeConverter` for automatic application to all attributes of a given type. + +For example, the following converter will be automatically applied to any attribute of type `BitSet`, and takes care of persisting the `BitSet` to a column of type `varbinary`: + +[source,java] +---- +@Converter(autoApply = true) +public static class EnumSetConverter implements AttributeConverter,Integer> { + @Override + public Integer convertToDatabaseColumn(EnumSet enumSet) { + int encoded = 0; + var values = DayOfWeek.values(); + for (int i = 0; i convertToEntityAttribute(Integer encoded) { + var set = EnumSet.noneOf(DayOfWeek.class); + var values = DayOfWeek.values(); + for (int i = 0; i`. + +[source,java] +---- +@JdbcType(TimestampJdbcType.class) +@Convert(converter = LongToTimestampConverter.class) +long currentTimeMillis; +---- + +Let's abandon our analogy right here, before we start calling this basic type a "throuple". + +[[embeddable-objects]] +=== Embeddable objects + +An embeddable object is a Java class whose state maps to multiple columns of a table, but which doesn't have its own persistent identity. +That is, it's a class with mapped attributes, but no `@Id` attribute. + +An embeddable object can only be made persistent by assigning it to the attribute of an entity. +Since the embeddable object does not have its own persistent identity, its lifecycle with respect to persistence is completely determined by the lifecycle of the entity to which it belongs. + +An embeddable class must be annotated `@Embeddable` instead of `@Entity`. + +[source,java] +---- +@Embeddable +class Name { + + @Basic(optional=false) + String firstName; + + @Basic(optional=false) + String lastName; + + String middleName; + + Name() {} + + Name(String firstName, String middleName, String lastName) { + this.firstName = firstName; + this.middleName = middleName; + this.lastName = lastName; + } + + ... +} +---- + +An embeddable class must satisfy the same requirements that entity classes satisfy, with the exception that an embeddable class has no `@Id` attribute. +In particular, it must have a constructor with no parameters. + +Alternatively, an embeddable type may be defined as a Java record type: + +[source,java] +---- +@Embeddable +record Name(String firstName, String middleName, String lastName) {} +---- + +In this case, the requirement for a constructor with no parameters is relaxed. + +[NOTE] +==== +Unfortunately, as of May 2023, Java `record` types still cannot be used as ``@EmbeddedId``s. +==== + +We may now use our `Name` class (or record) as the type of an entity attribute: + +[source,java] +---- +@Entity +class Author { + @Id @GeneratedValue + Long id; + + Name name; + + ... +} +---- + +Embeddable types can be nested. +That is, an `@Embeddable` class may have an attribute whose type is itself a different `@Embeddable` class. + +[TIP] +// .The `@Embedded` annotation is not required +==== +JPA provides an `@Embedded` annotation to identify an attribute of an entity that refers to an embeddable type. +This annotation is completely optional, and so we don't usually use it. +==== + +On the other hand a reference to an embeddable type is _never_ polymorphic. +One `@Embeddable` class `F` may inherit a second `@Embeddable` class `E`, but an attribute of type `E` will always refer to an instance of that concrete class `E`, never to an instance of `F`. + +Usually, embeddable types are stored in a "flattened" format. +Their attributes map columns of the table of their parent entity. +Later, in <>, we'll see a couple of different options. + +An attribute of embeddable type represents a relationship between a Java object with a persistent identity, and a Java object with no persistent identity. +We can think of it as a whole/part relationship. +The embeddable object belongs to the entity, and can't be shared with other entity instances. +And it exits for only as long as its parent entity exists. + +Next we'll discuss a different kind of relationship: a relationship between Java objects that each have their persistent identity and persistence lifecycle. + +[[associations]] +=== Associations + +An _association_ is a relationship between entities. +We usually classify associations based on their _multiplicity_. +If `E` and `F` are both entity classes, then: + +- a _one-to-one_ association relates at most one unique instance `E` with at most one unique instance of `F`, +- a _many-to-one_ association relates zero or more instances of `E` with a unique instance of `F`, and +- a _many-to-many_ association relates zero or more instances of `E` with zero or more instance of `F`. + +An association between entity classes may be either: + +- _unidirectional_, navigable from `E` to `F` but not from `F` to `E`, or +- _bidirectional_, and navigable in either direction. + +In this example data model, we can see the sorts of associations which are possible: + +image::images/associations.png[Example data model,align="center",pdfwidth=90%] + +[%unbreakable] +[TIP] +// .One-to-one associations and subtyping +==== +An astute observer of the diagram above might notice that the relationship we've presented as a unidirectional one-to-one association could reasonably be represented in Java using subtyping. +This is quite normal. +A one-to-one association is the usual way we implement subtyping in a fully-normalized relational model. +It's related to the `JOINED` <> strategy. +==== + +There are three annotations for mapping associations: `@ManyToOne`, `@OneToMany`, and `@ManyToMany`. +They share some common annotation members: + +.Association-defining annotation members +[%breakable,cols="13,~,35"] +|=== +| Member | Interpretation | Default value + +| `cascade` | Persistence operations which should <> to the associated entity; a list of ``CascadeType``s | `{}` +| `fetch` | Whether the association is <> <> or may be <> +a| +- `LAZY` for `@OneToMany` and `@ManyToMany` +- `EAGER` for `@ManyToOne` 💀💀💀 +| `targetEntity` | The associated entity class | Determined from the attribute type declaration +| `optional` | For a `@ManyToOne` or `@OneToOne` association, whether the association can be `null` | `true` +| `mappedBy` | For a bidirectional association, an attribute of the associated entity which maps the association | By default, the association is assumed unidirectional +|=== + +We'll explain the effect of these members as we consider the various types of association mapping. + +Let's begin with the most common association multiplicity. + +[[many-to-one]] +=== Many-to-one + +A many-to-one association is the most basic sort of association we can imagine. +It maps completely naturally to a foreign key in the database. +Almost all the associations in your domain model are going to be of this form. + +[TIP] +// .One-to-many join table mappings +==== +Later, we'll see how to map a many-to-one association to an <>. +==== + +The `@ManyToOne` annotation marks the "to one" side of the association, so a unidirectional many-to-one association looks like this: + +[source,java] +---- +class Book { + @Id @GeneratedValue + Long id; + + @ManyToOne(fetch=LAZY) + Publisher publisher; + + ... +} +---- + +Here, the `Book` table has a foreign key column holding the identifier of the associated `Publisher`. + +[[lazy-problem]] +[TIP] +// .Almost all associations should be lazy +==== +A very unfortunate misfeature of JPA is that `@ManyToOne` associations are fetched eagerly by default. +This is almost never what we want. +Almost all associations should be lazy. +The only scenario in which `fetch=EAGER` makes sense is if we think there's always a _very_ high probability that the <>. +Whenever this isn't the case, remember to explicitly specify `fetch=LAZY`. +==== + +Most of the time, we would like to be able to easily navigate our associations in both directions. +We do need a way to get the `Publisher` of a given `Book`, but we would also like to be able to obtain all the ``Book``s belonging to a given publisher. + +To make this association bidirectional, we need to add a collection-valued attribute to the `Publisher` class, and annotate it `@OneToMany`. + +[NOTE] +==== +Hibernate needs to <> unfetched associations at runtime. +Therefore, the many-valued side must be declared using an interface type like `Set` or `List`, and never using a concrete type like `HashSet` or `ArrayList`. +==== + +To indicate clearly that this is a bidirectional association, and to reuse any mapping information already specified in the `Book` entity, we must use the `mappedBy` annotation member to refer back to `Book.publisher`. + +[source,java] +---- +@Entity +class Publisher { + @Id @GeneratedValue + Long id; + + @OneToMany(mappedBy="publisher") + Set books; + + ... +} +---- + +The `Publisher.books` field is called the _unowned_ side of the association. + +Now, we passionately _hate_ the stringly-typed `mappedBy` reference to the owning side of the association. +Thankfully, the <> gives us a way to make it a +bit more typesafe: +[source,java] +---- +@OneToMany(mappedBy=Book_.PUBLISHER) // get used to doing it this way! +Set books; +---- +We're going to use this approach for the rest of the Introduction. + +To modify a bidirectional association, we must change the _owning side_. + +[[bidirectional-problem]] +[WARNING] +// .To modify a bidirectional association, you must change the _owning side_! +==== +Changes made to the unowned side of an association are never synchronized to the database. +If we desire to change an association in the database, we must change it from the owning side. +Here, we must set `Book.publisher`. + +In fact, it's often necessary to change _both sides_ of a bidirectional association. +For example, if the collection `Publisher.books` was stored in the second-level cache, we must also modify the collection, to ensure that the second-level cache remains synchronized with the database. +==== + +That said, it's _not_ a hard requirement to update the unowned side, at least if you're sure you know what you're doing. + +[TIP] +// .Unidirectional `@OneToMany`? +==== +In principle Hibernate _does_ allow you to have a unidirectional one-to-many, that is, a `@OneToMany` with no matching `@ManyToOne` on the other side. +In practice, this mapping is unnatural, and just doesn't work very well. +Avoid it. +==== + +Here we've used `Set` as the type of the collection, but Hibernate also allows the use of `List` or `Collection` here, with almost no difference in semantics. +In particular, the `List` may not contain duplicate elements, and its order will not be persistent. + +[source,java] +---- +@OneToMany(mappedBy=Book_.PUBLISHER) +Collection books; +---- + +We'll see how to map a collection with a persistent order <>. + +[[set-vs-list]] +.`Set`, `List`, or `Collection`? +**** +A one-to-many association mapped to a foreign key can never contain duplicate elements, so `Set` seems like the most semantically correct Java collection type to use here, and so that's the conventional practice in the Hibernate community. + +The catch associated with using a set is that we must carefully ensure that `Book` has a high-quality implementation of <>. +Now, that's not necessarily a bad thing, since a quality `equals()` is independently useful. + +But what if we used `Collection` or `List` instead? +Then our code would be much less sensitive to how `equals()` and `hashCode()` were implemented. + +In the past, we were perhaps too dogmatic in recommending the use of `Set`. +Now? I guess we're happy to let you guys decide. +In hindsight, we could have done more to make clear that this was always a viable option. +**** + +[[one-to-one-fk]] +=== One-to-one (first way) + +The simplest sort of one-to-one association is almost exactly like a `@ManyToOne` association, except that it maps to a foreign key column with a `UNIQUE` constraint. + +[TIP] +// .One-to-many join table mappings +==== +Later, we'll see how to map a one-to-one association to an <>. +==== + +A one-to-one association must be annotated `@OneToOne`: + +[source,java] +---- +@Entity +class Author { + @Id @GeneratedValue + Long id; + + @OneToOne(optional=false, fetch=LAZY) + Person author; + + ... +} +---- + +Here, the `Author` table has a foreign key column holding the identifier of the associated `Publisher`. + +[TIP] +// .One-to-one associations are a way to represent subtyping +==== +A one-to-one association often models a "type of" relationship. +In our example, an `Author` is a type of `Person`. +An alternative—and often more natural—way to represent "type of" relationships in Java is via <>. +==== + +We can make this association bidirectional by adding a reference back to the `Author` in the `Person` entity: + +[source,java] +---- +@Entity +class Person { + @Id @GeneratedValue + Long id; + + @OneToOne(mappedBy = Author_.PERSON) + Author author; + + ... +} +---- + +`Person.author` is the unowned sure, because it's the side marked `mappedBy`. + +.Lazy fetching for one-to-one associations +**** +Notice that we did not declare the unowned end of the association `fetch=LAZY`. +That's because: + +1. not every `Person` has an associated `Author`, and +2. the foreign key is held in the table mapped by `Author`, not in the table mapped by `Person`. + +Therefore, Hibernate can't tell if the reference from `Person` to `Author` is `null` without fetching the associated `Author`. + +On the other hand, if _every_ `Person` was an `Author`, that is, if the association were non-`optional`, we would not have to consider the possibility of `null` references, and we would map it like this: + +[source,java] +---- +@OneToOne(optional=false, mappedBy = Author_.PERSON, fetch=LAZY) +Author author; +---- +**** + +This is not the only sort of one-to-one association. + +[[one-to-one-pk]] +=== One-to-one (second way) + +An arguably more elegant way to represent such a relationship is to share a primary key between the two tables. + +To use this approach, the `Author` class must be annotated like this: + +[source,java] +---- +@Entity +class Author { + @Id + Long id; + + @OneToOne(optional=false, fetch=LAZY) + @MapsId + Person author; + + ... +} +---- + +Notice that, compared with the previous mapping: + +- the `@Id` attribute is no longer a `@GeneratedValue` and, +- instead, the `author` association is annotated `@MapsId`. + +This lets Hibernate know that the association to `Person` is the source of primary key values for `Author`. + +Here, there's no extra foreign key column in the `Author` table, since the `id` column holds the identifier of `Person`. +That is, the primary key of the `Author` table does double duty as the foreign key referring to the `Person` table. + +The `Person` class doesn't change. +If the association is bidirectional, we annotate the unowned side `@OneToOne(mappedBy = Author_.PERSON)` just as before. + +[[many-to-many]] +=== Many-to-many + +A unidirectional many-to-many association is represented as a collection-valued attribute. +It always maps to a separate _association table_ in the database. + +It tends to happen that a many-to-many association eventually turns out to be an entity in disguise. + +[TIP] +==== +Suppose we start with a nice clean many-to-many association between `Author` and `Book`. +Later on, it's quite likely that we'll discover some additional information which comes attached to the association, so that the association table needs some extra columns. + +For example, imagine that we needed to report the percentage contribution of each author to a book. +That information naturally belongs to the association table. +We can't easily store it as an attribute of `Book`, nor as an attribute of `Author`. + +When this happens, we need to change our Java model, usually introducing a new entity class which maps the association table directly. +In our example, we might call this entity something like `BookAuthorship`, and it would have `@OneToMany` associations to both `Author` and `Book`, along with the `contribution` attribute. + +We can evade the disruption occasioned by such "discoveries" by simply avoiding the use of `@ManyToMany` right from the start. +There's little downside to representing every—or at least _almost_ every—logical many-to-many association using an intermediate entity. +==== + +A many-to-many association must be annotated `@ManyToMany`: + +[source,java] +---- +@Entity +class Book { + @Id @GeneratedValue + Long id; + + @ManyToMany + Set authors; + + ... +} +---- + +If the association is bidirectional, we add a very similar-looking attribute to `Book`, but this time we must specify `mappedBy` to indicate that this is the unowned side of the association: + +[source,java] +---- +@Entity +class Book { + @Id @GeneratedValue + Long id; + + @ManyToMany(mappedBy=Author_.BOOKS) + Set authors; + + ... +} +---- + +Remember, if we wish to the modify the collection we must <>. + +We've again used ``Set``s to represent the association. +As before, we have the option to use `Collection` or `List`. +But in this case it _does_ make a difference to the semantics of the association. + +[NOTE] +// .Sets and bags +==== +A many-to-many association represented as a `Collection` or `List` may contain duplicate elements. +However, as before, the order of the elements is not persistent. +That is, the collection is a _bag_, not a set. +==== + +[[collections]] +=== Collections of basic values and embeddable objects + +We've now seen the following kinds of entity attribute: + +[%breakable,cols="32,^15,^15,~"] +|=== +| Kind of entity attribute | Kind of reference | Multiplicity | Examples + +| Single-valued attribute of basic type | Non-entity | At most one | `@Basic String name` +| Single-valued attribute of embeddable type | Non-entity | At most one | `@Embedded Name name` +| Single-valued association | Entity | At most one | +`@ManyToOne Publisher publisher` + +`@OneToOne Person person` +| Many-valued association | Entity | Zero or more | +`@OneToMany Set books` + +`@ManyToMany Set authors` +|=== + +Scanning this taxonomy, you might ask: does Hibernate have multivalued attributes of basic or embeddable type? + +Well, actually, we've already seen that it does, at least in two special cases. +So first, lets <> that JPA treats `byte[]` and `char[]` arrays as basic types. +Hibernate persists a `byte[]` or `char[]` array to a `VARBINARY` or `VARCHAR` column, respectively. + +But in this section we're really concerned with cases _other_ than these two special cases. +So then, _apart from ``byte[]`` and ``char[]``_, does Hibernate have multivalued attributes of basic or embeddable type? + +And the answer again is that _it does_. Indeed, there are two different ways to handle such a collection, by mapping it: + +- to a column of SQL `ARRAY` type (assuming the database has an `ARRAY` type), or +- to a separate table. + +So we may expand our taxonomy with: + +[%breakable,cols="32,^15,^15,~"] +|=== +| Kind of entity attribute | Kind of reference | Multiplicity | Examples + +| `byte[]` and `char[]` arrays | Non-entity | Zero or more | +`byte[] image` + +`char[] text` +| Collection of basic-typed elements | Non-entity | Zero or more | +`@Array String[] names` + +`@ElementCollection Set names` +| Collection of embeddable elements | Non-entity | Zero or more | `@ElementCollection Set names` +|=== + +There's actually two new kinds of mapping here: `@Array` mappings, and `@ElementCollection` mappings. + +[%unbreakable] +[CAUTION] +// .These sorts of mappings are overused +==== +These sorts of mappings are overused. + +There _are_ situations where we think it's appropriate to use a collection of basic-typed values in our entity class. +But such situations are rare. +Almost every many-valued relationship should map to a foreign key association between separate tables. +And almost every table should be mapped by an entity class. + +The features we're about to meet in the next two subsections are used much more often by beginners than they're used by experts. +So if you're a beginner, you'll save yourself same hassle by staying away from these features for now. +==== + +We'll talk about `@Array` mappings first. + +[[arrays]] +=== Collections mapped to SQL arrays + +Let's consider a calendar event which repeats on certain days of the week. +We might represent this in our `Event` entity as an attribute of type `DayOfWeek[]` or `List`. +Since the number of elements of this array or list is upper bounded by 7, this is a reasonable case for the use of an `ARRAY`-typed column. +It's hard to see much value in storing this collection in a separate table. + +[%unbreakable] +.Learning to not hate SQL arrays +**** +For a long time, we thought arrays were a kind of weird and warty thing to add to the relational model, but recently we've come to realize that this view was overly closed-minded. +Indeed, we might choose to view SQL `ARRAY` types as a generalization of `VARCHAR` and `VARBINARY` to generic "element" types. +And from this point of view, SQL arrays look quite attractive, at least for certain problems. +If we're comfortable mapping `byte[]` to `VARBINARY(255)`, why would we shy away from mapping `DayOfWeek[]` to `TINYINT ARRAY[7]`? +**** + +Unfortunately, JPA doesn't define a standard way to map SQL arrays, but here's how we can do it in Hibernate: + +[source, java] +---- +@Entity +class Event { + @Id @GeneratedValue + Long id; + ... + @Array(length=7) + DayOfWeek[] daysOfWeek; // stored as a SQL ARRAY type + ... +} +---- + +The `@Array` annotation is optional, but it's important to limit the amount of storage space the database allocates to the `ARRAY` column. + +[WARNING] +// .Not every database has an `ARRAY` type +==== +Now for the gotcha: not every database has a SQL `ARRAY` type, and some that _do_ have an `ARRAY` type don't allow it to be used as a column type. + +In particular, neither DB2 nor SQL Server have array-typed columns. +On these databases, Hibernate falls back to something much worse: it uses Java serialization to encode the array to a binary representation, and stores the binary stream in a `VARBINARY` column. +Quite clearly, this is terrible. +You can ask Hibernate to do something _slightly_ less terrible by annotating the attribute `@JdbcTypeCode(SqlTypes.JSON)`, so that the array is serialized to JSON instead of binary format. +But at this point it's better to just admit defeat and use an `@ElementCollection` instead. +==== + +Alternatively, we could store this array or list in a separate table. + +[[element-collections]] +=== Collections mapped to a separate table + +JPA _does_ define a standard way to map a collection to an auxiliary table: the `@ElementCollection` annotation. + +[source, java] +---- +@Entity +class Event { + @Id @GeneratedValue + Long id; + ... + @ElementCollection + DayOfWeek[] daysOfWeek; // stored in a dedicated table + ... +} +---- + +Actually, we shouldn't use an array here, since array types can't be <>, and so the JPA specification doesn't even say they're supported. +Instead, we should use `Set`, `List`, or `Map`. + +[source, java] +---- +@Entity +class Event { + @Id @GeneratedValue + Long id; + ... + @ElementCollection + List daysOfWeek; // stored in a dedicated table + ... +} +---- + +Here, each collection elements are stored as separate rows of the auxiliary table. +By default, this table has the following definition: + +[source,sql] +---- +create table Event_daysOfWeek ( + Event_id bigint not null, + daysOfWeek tinyint check (daysOfWeek between 0 and 6), + daysOfWeek_ORDER integer not null, + primary key (Event1_id, daysOfWeek_ORDER) +) +---- + +Which is fine, but it's still a mapping we prefer to avoid. + +[%unbreakable] +[WARNING] +// .This is not what we would do +==== +`@ElementCollection` is one of our least-favorite features of JPA. +Even the name of the annotation is bad. + +The code above results in a table with three columns: + +- a foreign key of the `Event` table, +- a `TINYINT` encoding the `enum`, and +- an `INTEGER` encoding the ordering of elements in the array. + +Instead of a surrogate primary key, it has a composite key comprising the foreign key of `Event` and the order column. + +When—inevitably—we find that we need to add a fourth column to that table, our Java code must change completely. +Most likely, we'll realize that we need to add a separate entity after all. +So this mapping isn't very robust in the face of minor changes to our data model. +==== + +There's much more we could say about "element collections", but we won't say it, because we don't want to hand you the gun you'll shoot your foot with. + +[[entities-summary]] +=== Summary of annotations + +Let's pause to remember the annotations we've met so far. + +.Declaring entities and embeddable types +[%breakable,cols="22,~,^13"] +|=== +| Annotation | Purpose | JPA-standard + +| `@Entity` | Declare an entity class | ✔ +| `@MappedSuperclass` | Declare a non-entity class with mapped attributes inherited by an entity | ✔ +| `@Embeddable` | Declare an embeddable type | ✔ +| `@IdClass` | declare the identifier class for an entity with multiple `@Id` attributes | ✔ +|=== + +.Declaring basic attributes +[%breakable,cols="22,~,^10,^13"] +|=== +| Annotation | Purpose | | JPA-standard + +| `@Id` | Declare a basic-typed identifier attribute | | ✔ +| `@Version` | Declare a version attribute | | ✔ +| `@Basic` | Declare a basic attribute | Default | ✔ +| `@EmbeddedId` | Declare an embeddable-typed identifier attribute | | ✔ +| `@Embedded` | Declare an embeddable-typed attribute | Inferred | ✔ +| `@Enumerated` | Declare an `enum`-typed attribute and specify how it is encoded | Inferred | ✔ +| `@Array` | Declare that an attribute maps to a SQL `ARRAY`, and specify the length | Inferred | ✖ +| `@ElementCollection` | Declare that a collection is mapped to a dedicated table | | ✔ +|=== + +.Converters and compositional basic types +[%breakable,cols="22,~,^13"] +|=== +| Annotation | Purpose | JPA-standard + +| `@Converter` | Register an `AttributeConverter` | ✔ +| `@Convert` | Apply a converter to an attribute | ✔ +| `@JavaType` | Explicitly specify an implementation of `JavaType` for a basic attribute | ✖ +| `@JdbcType` | Explicitly specify an implementation of `JdbcType` for a basic attribute | ✖ +| `@JdbcTypeCode` | Explicitly specify a JDBC type code used to determine the `JdbcType` for a basic attribute | ✖ +| `@JavaTypeRegistration` | Register a `JavaType` for a given Java type | ✖ +| `@JdbcTypeRegistration` | Register a `JdbcType` for a given JDBC type code | ✖ +|=== + +.System-generated identifiers +[%breakable,cols="22,~,^13"] +|=== +| Annotation | Purpose | JPA-standard + +| `@GeneratedValue` | Specify that an identifier is system-generated | ✔ +| `@SequenceGenerator` | Define an id generated backed by on a database sequence | ✔ +| `@TableGenerator` | Define an id generated backed by a database table | ✔ +| `@IdGeneratorType` | Declare an annotation that associates a custom `Generator` with each `@Id` attribute it annotates | ✖ +| `@ValueGenerationType` | Declare an annotation that associates a custom `Generator` with each `@Basic` attribute it annotates | ✖ +|=== + +.Declaring entity associations +[%breakable,cols="22,~,^13"] +|=== +| Annotation | Purpose | JPA-standard + +| `@ManyToOne` | Declare the single-valued side of a many-to-one association (the owning side) | ✔ +| `@OneToMany` | Declare the many-valued side of a many-to-one association (the unowned side) | ✔ +| `@ManyToMany` | Declare either side of a one-to-one association | ✔ +| `@OneToOne` | Declare either side of a one-to-one association | ✔ +| `@MapsId` | Declare that the owning side of a `@OneToOne` association maps the primary key column | ✔ +|=== + +Phew! +That's already a lot of annotations, and we have not even started with the annotations for O/R mapping! + +[[equals-and-hash]] +=== `equals()` and `hashCode()` + +Entity classes should override `equals()` and `hashCode()`, especially when associations are <>. + +People new to Hibernate or JPA are often confused by exactly which fields should be included in the `hashCode()`. +Even people with plenty of experience often argue quite religiously that one or another approach is the only right way. +The truth is, there's no unique right way to do it, but there are some constraints. +So please keep the following principles in mind: + +- You should not include a mutable field in the hashcode, since that would require rehashing every collection containing the entity whenever the field is mutated. +- It's not completely wrong to include a generated identifier (surrogate key) in the hashcode, but since the identifier is not generated until the entity instance is made persistent, you must take great care to not add it to any hashed collection before the identifier is generated. We therefore advise against including any database-generated field in the hashcode. + +It's OK to include any immutable, non-generated field in the hashcode. + +TIP: We therefore recommend identifying a <> for each entity, that is, a combination of fields that uniquely identifies an instance of the entity, from the perspective of the data model of the program. The natural key should correspond to a unique constraint on the database, and to the fields which are included in `equals()` and `hashCode()`. + +[source,java] +---- +@Entity +class Book { + + @Id @GeneratedValue + Long id; + + @NaturalId + @Basic(optional=false) + String isbn; + + ... + + @Override + public boolean equals(Object other) { + return other instanceof Book + && ((Book) other).isbn.equals(isbn); + } + @Override + public int hashCode() { + return isbn.hashCode(); + } +} +---- + +That said, an implementation of `equals()` and `hashCode()` based on the generated identifier of the entity can work _if you're careful_. diff --git a/documentation/src/main/asciidoc/introduction/Hibernate_Introduction.adoc b/documentation/src/main/asciidoc/introduction/Hibernate_Introduction.adoc new file mode 100644 index 000000000000..a6dcdddf5165 --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Hibernate_Introduction.adoc @@ -0,0 +1,25 @@ +:shared-attributes-dir: ../shared/ + +include::{shared-attributes-dir}/common-attributes.adoc[] +include::{shared-attributes-dir}/url-attributes.adoc[] +include::{shared-attributes-dir}/filesystem-attributes.adoc[] +include::{shared-attributes-dir}/renderer-attributes.adoc[] + + += An Introduction to Hibernate 6 +:title-logo-image: image:../../style/asciidoctor/images/org/hibernate/logo.png[] +:toc: +:toclevels: 3 + +include::Preface.adoc[] + +:numbered: + +include::Introduction.adoc[] +include::Configuration.adoc[] +include::Entities.adoc[] +include::Mapping.adoc[] +include::Interacting.adoc[] +include::Tuning.adoc[] +include::Advanced.adoc[] +include::Credits.adoc[] diff --git a/documentation/src/main/asciidoc/introduction/Interacting.adoc b/documentation/src/main/asciidoc/introduction/Interacting.adoc new file mode 100644 index 000000000000..79d75132d6bc --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Interacting.adoc @@ -0,0 +1,1152 @@ +[[interacting]] +== Interacting with the database + +To interact with the database, that is, to execute queries, or to insert, update, or delete data, we need an instance of one of the following objects: + +- a JPA `EntityManager`, +- a Hibernate `Session`, or +- a Hibernate `StatelessSession`. + +The `Session` interface extends `EntityManager`, and so the only difference between the two interfaces is that `Session` offers a few more operations. + +[TIP] +// .The `Session` hiding inside an `EntityManager` +==== +Actually, in Hibernate, every `EntityManager` is a `Session`, and you can narrow it like this: + +[source,java] +---- +Session session = entityManager.unwrap(Session.class); +---- +==== + +An instance of `Session` (or of `EntityManager`) is a _stateful session_. +It mediates the interaction between your program and the database via a operations on a _persistence context_. + +In this chapter, we're not going to talk much about `StatelessSession`. +We'll come back to <> when we talk about performance. +What you need to know for now is that a stateless session doesn't have a persistence context. + +[TIP] +// .Some people prefer `StatelessSession` +==== +Still, we should let you know that some people prefer to use `StatelessSession` everywhere. +It's a simpler programming model, and lets the developer interact with the database more _directly_. + +Stateful sessions certainly have their advantages, but they're more difficult to reason about, and when something goes wrong, the error messages can be more difficult to understand. +==== + +[[persistence-contexts]] +=== Persistence Contexts + +A persistence context is a sort of cache; we sometimes call it the "first-level cache", to distinguish it from the <>. +For every entity instance read from the database within the scope of a persistence context, and for every new entity made persistent within the scope of the persistence context, the context holds a unique mapping from the identifier of the entity instance to the instance itself. + +Thus, an entity instance may be in one of three states with respect to a given persistence context: + +1. _transient_ — never persistent, and not associated with the persistence context, +2. _persistent_ — currently associated with the persistence context, or +3. _detached_ — previously persistent in another session, but not currently associated with _this_ persistence context. + +image::images/entity-lifecyle.png[Entity lifecycle,width=800,align="center"] + +At any given moment, an instance may be associated with at most one persistence context. + +The lifetime of a persistence context usually corresponds to the lifetime of a transaction, though it's possible to have a persistence context that spans several database-level transactions that form a single logical unit of work. + +[WARNING] +==== +A persistence context—that is, a `Session` or `EntityManager`—absolutely positively **must not be shared between multiple threads or between concurrent transactions.** + +If you accidentally leak a session across threads, you will suffer. +==== + +.Container-managed persistence contexts +**** +In a container environment, the lifecycle of a persistence context scoped to the transaction will usually be managed for you. +**** + +There are several reasons we like persistence contexts. + +1. They help avoid _data aliasing_: if we modify an entity in one section of code, then other code executing within the same persistence context will see our modification. +2. They enable _automatic dirty checking_: after modifying an entity, we don't need to perform any explicit operation to ask Hibernate to propagate that change back to the database. + Instead, the change will be automatically synchronized with the database when the session is <>. +3. They can improve performance by avoiding a trip to the database when a given entity instance is requested repeatedly in a given unit of work. +4. They make it possible to _transparently batch_ together multiple database operations. + +A persistence context also allows us to detect circularities when performing operations on graphs of entities. +(Even in a stateless session, we need some sort of temporary cache of the entity instances we've visited while executing a query.) + +On the other hand, stateful sessions come with some very important restrictions, since: + +- persistence contexts aren't threadsafe, and can't be shared across threads, and +- a persistence context can't be reused across unrelated transactions, since that would break the isolation and atomicity of the transactions. + +Furthermore, a persistence context holds a hard references to all its entities, preventing them from being garbage collected. +Thus, the session must be discarded once a unit of work is complete. + +[IMPORTANT] +// .This is important +==== +If you don't completely understand the previous passage, go back and re-read it until you do. +A great deal of human suffering has resulted from users mismanaging the lifecycle of the Hibernate `Session` or JPA `EntityManager`. +==== + +We'll conclude by noting that whether a persistence context helps or harms the performance of a given unit of work depends greatly on the nature of the unit of work. +For this reason Hibernate provides both stateful and stateless sessions. + +[[creating-session]] +=== Creating a session + +Sticking with standard JPA-defined APIs, we saw how to obtain an `EntityManagerFactory` in <>. +It's quite unsurprising that we may use this object to create an `EntityManager`: + +[source,java] +---- +EntityManager entityManager = entityManagerFactory.createEntityManager(); +---- + +When we're finished with the `EntityManager`, we should explicitly clean it up: + +[source,java] +---- +entityManager.close(); +---- + +On the other hand, if we're starting from a `SessionFactory`, as described in <>, we may use: + +[source,java] +---- +Session session = sessionFactory.openSession(); +---- + +But we still need to clean up: + +[source,java] +---- +session.close(); +---- + +.Injecting the `EntityManager` +**** +If you're writing code for some sort of container environment, you'll probably obtain the `EntityManager` by some sort of dependency injection. +For example, in Java (or Jakarta) EE you would write: + +[source,java] +---- +@PersistenceContext EntityManager entityManager; +---- + +In Quarkus, injection is handled by CDI: + +[source,java] +---- +@Inject EntityManager entityManager; +---- +**** + +Outside a container environment, we'll also have to write code to manage database transactions. + +[[managing-transactions]] +=== Managing transactions + +Using JPA-standard APIs, the `EntityTransaction` interface allows us to control database transactions. +The idiom we recommend is the following: + +[source,java] +---- +EntityManager entityManager = entityManagerFactory.createEntityManager(); +EntityTransaction tx = entityManager.getTransaction(); +try { + tx.begin(); + //do some work + ... + tx.commit(); +} +catch (Exception e) { + if (tx.isActive()) tx.rollback(); + throw e; +} +finally { + entityManager.close(); +} +---- + +Using Hibernate's native APIs we might write something really similar, +// [source,java] +// ---- +// Session session = sessionFactory.openSession(); +// Transaction tx = null; +// try { +// tx = session.beginTransaction(); +// //do some work +// ... +// tx.commit(); +// } +// catch (Exception e) { +// if (tx!=null) tx.rollback(); +// throw e; +// } +// finally { +// session.close(); +// } +// ---- +but since this sort of code is extremely tedious, we have a much nicer option: + +[source,java] +---- +sessionFactory.inTransaction(session -> { + //do the work + ... +}); +---- + +.Container-managed transactions +**** +In a container environment, the container itself is usually responsible for managing transactions. +In Java EE or Quarkus, you'll probably indicate the boundaries of the transaction using the `@Transactional` annotation. +**** + +[[persistence-operations]] +=== Operations on the persistence context + +Of course, the main reason we need an `EntityManager` is to do stuff to the database. +The following important operations let us interact with the persistence context and schedule modifications to the data: + +.Methods for modifying data and managing the persistence context +[%breakable,cols="30,~"] +|=== +| Method name and parameters | Effect + +| `persist(Object)` +| Make a transient object persistent and schedule a SQL `insert` statement for later execution +| `remove(Object)` +| Make a persistent object transient and schedule a SQL `delete` statement for later execution +| `merge(Object)` +| Copy the state of a given detached object to a corresponding managed persistent instance and return +the persistent object +| `detach(Object)` +| Disassociate a persistent object from a session without +affecting the database +| `clear()` +| Empty the persistence context and detach all its entities +| `flush()` +| Detect changes made to persistent objects association with the session and synchronize the database state with the state of the session by executing SQL `insert`, `update`, and `delete` statements +|=== + +Notice that `persist()` and `remove()` have no immediate effect on the database, and instead simply schedule a command for later execution. +Also notice that there's no `update()` operation for a stateful session. +Modifications are automatically detected when the session is <>. + +On the other hand, the following operations all result in immediate access to the database: + +.Methods for reading and locking data +[%breakable,cols="30,~"] +|=== +| Method name and parameters | Effect + +| `find(Class,Object)` +| Obtain a persistent object given its type and its id +| `find(Class,Object,LockModeType)` +| Obtain a persistent object given its type and its id, requesting the given <> +| `getReference(Class,id)` +| Obtain a reference to a persistent object given its type and its id, without actually loading its state from the database +| `getReference(Object)` +| Obtain a reference to a persistent object with the same identity as the given detached instance, without actually loading its state from the database +| `refresh(Object)` +| Refresh the persistent state of an object using a new SQL `select` to retrieve its current state from the database +| `refresh(Object,LockModeType)` +| Refresh the persistent state of an object using a new SQL `select` to retrieve its current state from the database, requesting the given <> +| `lock(Object, LockModeType)` +| Obtain an <> on a persistent object +|=== + +Any of these operations might throw an exception. +Now, if an exception occurs while interacting with the database, there's no good way to resynchronize the state of the current persistence context with the state held in database tables. + +Therefore, a session is considered to be unusable after any of its methods throws an exception. + +[IMPORTANT] +// .The persistence context is fragile +==== +The persistence context is fragile. +If you receive an exception from Hibernate, you should immediately close and discard the current session. Open a new session if you need to, but throw the bad one away first. +==== + +Each of the operations we've seen so far affects a single entity instance passed as an argument. +But there's a way to set things up so that an operation will propagate to associated entities. + +[[cascade]] +=== Cascading persistence operations + +It's quite often the case that the lifecycle of a _child_ entity is completely dependent on the lifecycle of some _parent_. +This is especially common for many-to-one and one-to-one associations, though it's very rare for many-to-many associations. + +For example, it's quite common to make an `Order` and all its ``Item``s persistent in the same transaction, or to delete a `Project` and its ``Files``s at once. +This sort of relationship is sometimes called a _whole/part_-type relationship. + +_Cascading_ is a convenience which allows us to propagate one of the operations listed in <> from a parent to its children. +To set up cascading, we specify the `cascade` member of one of the association mapping annotations, usually `@OneToMany` or `@OneToOne`. + +[source,java] +---- +@Entity +class Order { + ... + @OneToMany(mappedby=Item_.ORDER, + // cascade persist(), remove(), and refresh() from Order to Item + cascade={PERSIST,REMOVE,REFRESH}, + // also remove() orphaned Items + orphanRemoval=true) + private Set items; + ... +} +---- + +_Orphan removal_ indicates that an `Item` should be automatically deleted if it is removed from the set of items belonging to its parent `Order`. + +[[proxies-and-lazy-fetching]] +=== Proxies and lazy fetching + +Our data model is a set of interconnected entities, and in Java our whole dataset would be represented as an enormous interconnected graph of objects. +It's possible that this graph is disconnected, but more likely it's connected, or composed of a relatively small number of connected subgraphs. + +Therefore, when we retrieve on object belonging to this graph from the database and instantiate it in memory, we simply can't recursively retrieve and instantiate all its associated entities. +Quite aside from the waste of memory on the VM side, this process would involve a huge number of round trips to the database server, or a massive multidimensional cartesian product of tables, or both. +Instead, we're forced to cut the graph somewhere. + +Hibernate solves this problem using _proxies_ and _lazy fetching_. +A proxy is an object that masquerades as a real entity or collection, but doesn't actually hold any state, because that state has not yet been fetched from the database. +When you call a method of the proxy, Hibernate will detect the call and fetch the state from the database before allowing the invocation to proceed to the real entity object or collection. + +Now for the gotchas: + +1. Hibernate will only do this for an entity which is currently associated with a persistence context. + Once the session ends, and the persistence context is cleaned up, the proxy is no longer fetchable, and instead its methods throw the hated `LazyInitializationException`. +2. A round trip to the database to fetch the state of a single entity instance is just about _the least efficient_ way to access data. + It almost inevitably leads to the infamous _N+1 selects_ problem we'll discuss later when we talk about how to <>. + +[TIP] +// .Strive to avoid triggering lazy fetching +==== +We're getting a bit ahead of ourselves here, but let's quickly mention the general strategy we recommend to navigate past these gotchas: + +- All associations should be set `fetch=LAZY` to avoid fetching extra data when it's not needed. + As we mentioned <>, this setting is not the default for `@ManyToOne` associations, and must be specified explicitly. +- But strive to avoid writing code which triggers lazy fetching. + Instead, fetch all the data you'll need upfront at the beginning of a unit of work, using one of the techniques described in <>, usually, using _join fetch_ in HQL or an `EntityGraph`. +==== + +It's important to know that some operations which may be performed with an unfetched proxy _don't_ require fetching its state from the database. +First, we're always allowed to obtain its identifier: + +[source,java] +---- +var pubId = entityManager.find(Book.class, bookId).getPublisher().getId(); // does not fetch publisher +---- + +Second, we may create an association to a proxy: + +[source,java] +---- +book.setPublisher(entityManager.getReference(Publisher.class, pubId)); // does not fetch publisher +---- + +Sometimes it's useful to test whether a proxy or collection has been fetched from the database. +JPA lets us do this using the `PersistenceUnitUtil`: + +[source,java] +---- +boolean authorsFetched = entityManagerFactory.getPersistenceUnitUtil().isLoaded(book.getAuthors()); +---- + +Hibernate has a slightly easier way to do it: + +[source,java] +---- +boolean authorsFetched = Hibernate.isInitialized(book.getAuthors()); +---- + +But the static methods of the `Hibernate` class let us do a lot more, and it's worth getting a bit familiar them. + +Of particular interest are the operations which let us work with unfetched collections without fetching their state from the database. +For example, consider this code: + +[source,java] +---- +Book book = session.find(Book.class, bookId); // fetch just the Book, leaving authors unfetched +Author authorRef = session.getReference(Author.class, authorId); // obtain an unfetched proxy +boolean isByAuthor = Hibernate.contains(book.getAuthors(), authorRef); // no fetching +---- + +This code fragment leaves both the set `book.authors` and the proxy `authorRef` unfetched. + +Finally, `Hibernate.initialize()` is a convenience method that force-fetches a proxy or collection: + +[source,java] +---- +Book book = session.find(Book.class, bookId); // fetch just the Book, leaving authors unfetched +Hibernate.initialize(book.getAuthors()); // fetch the Authors +---- + +But of course, this code is very inefficient, requiring two trips to the database to obtain data that could in principle be retrieved with just one query. + +It's clear from the discussion above that we need a way to request that an association be _eagerly_ fetched using a database `join`, thus protecting ourselves from the infamous N+1 selects. +One way to do that is by passing an `EntityGraph` to `find()`. + +[[entity-graph]] +=== Entity graphs and eager fetching + +When an association is mapped `fetch=LAZY`, it won't, by default, be fetched when we call the `find()` method. +We may request that an association be fetched eagerly (immediately) by passing an `EntityGraph` to `find()`. + +The JPA-standard API for this is a bit unwieldy: + +[source,java] +---- +var graph = entityManager.createEntityGraph(Book.class); +graph.addSubgraph(Book_.publisher); +Book book = entityManager.find(Book.class, bookId, Map.of(SpecHints.HINT_SPEC_FETCH_GRAPH, graph)); +---- + +This is untypesafe and unnecessarily verbose. +Hibernate has a better way: + +[source,java] +---- +var graph = session.createEntityGraph(Book.class); +graph.addSubgraph(Book_.publisher); +Book book = session.byId(Book.class).withFetchGraph(graph).load(bookId); +---- + +This code adds a `left outer join` to our SQL query, fetching the associated `Publisher` along with the `Book`. + +We may even attach additional nodes to our `EntityGraph`: + +[source,java] +---- +var graph = session.createEntityGraph(Book.class); +graph.addSubgraph(Book_.publisher); +graph.addPluralSubgraph(Book_.authors).addSubgraph(Author_.person); +Book book = session.byId(Book.class).withFetchGraph(graph).load(bookId); + +---- + +This results in a SQL query with _four_ ``left outer join``s. + +[NOTE] +==== +In the code examples above, The classes `Book_` and `Author_` are generated by the <> we saw earlier. +They let us refer to attributes of our model in a completely type-safe way. +We'll use them again, below, when we talk about <>. +==== + +JPA specifies that any given `EntityGraph` may be interpreted in two different ways. + +- A _fetch graph_ specifies exactly the associations that should be eagerly loaded. + Any association not belonging to the entity graph is proxied and loaded lazily only if required. +- A _load graph_ specifies that the associations in the entity graph are to be fetched in addition to the associations mapped `fetch=EAGER`. + +You're right, the names make no sense. +But don't worry, if you take our advice, and map your associations `fetch=LAZY`, there's no difference between a "fetch" graph and a "load" graph, so the names don't matter. + +[NOTE] +==== +JPA even specifies a way to define named entity graphs using annotations. +But the annotation-based API is so verbose that it's just not worth using. +==== + +[[flush]] +=== Flushing the session + +From time to time, a _flush_ operation is triggered, and the session synchronizes dirty state held in memory—that is, modifications to the state of entities associated with the persistence context—with persistent state held in the database. Of course, it does this by executing SQL `INSERT`, `UPDATE`, and `DELETE` statements. + +By default, a flush is triggered when: + +- the current transaction commits, for example, when `Transacion.commit()` is called, +- before execution of a query whose result would be affected by the synchronization of dirty state held in memory, or +- when the program directly calls `flush()`. + +[NOTE] +// .SQL execution happens asynchronously +==== +Notice that SQL statements are not usually executed synchronously by methods of the `Session` interface like `persist()` and `remove()`. If synchronous execution of SQL is desired, the `StatelessSession` allows this. +==== + +This behavior can be controlled by explicitly setting the flush mode. +For example, to disable flushes that occur before query execution, call: + +[source,java] +---- +entityManager.setFlushMode(FlushModeType.COMMIT); +---- + +Hibernate allows greater control over the flush mode than JPA: + +[source,java] +---- +session.setHibernateFlushMode(FlushMode.MANUAL); +---- + +Since flushing is a somewhat expensive operation (the session must dirty-check every entity in the persistence context), setting the flush mode to `COMMIT` can occasionally be a useful optimization. + +.Flush modes +[%breakable,cols="15,15,~"] +|=== +| Hibernate `FlushMode` | JPA `FlushModeType` | Interpretation + +| `MANUAL` | | Never flush automatically +| `COMMIT` | `COMMIT` | Flush before transaction commit +| `AUTO` | `AUTO` | Flush before transaction commit, and before execution of a query whose results might be affected by modifications held in memory +| `ALWAYS` | | Flush before transaction commit, and before execution of every query +|=== + +A second way to reduce the cost of flushing is to load entities in _read-only_ mode: + +- `Session.setDefaultReadOnly(false)` specifies that all entities loaded by a given session should be loaded in read-only mode by default, +- `SelectionQuery.setReadOnly(false)` specifies that every entity returned by a given query should be loaded in read-only mode, and +- `Session.setReadOnly(Object, false)` specifies that a given entity already loaded by the session should be switched to read-only mode. + +It's not necessary to dirty-check on entity instance in read-only mode. + +[[queries]] +=== Queries + +:hql: {doc-user-guide-url}#query-language + +Hibernate features three complementary ways to write queries: + +- the _Hibernate Query Language_, an extremely powerful superset of JPQL, which abstracts most of the features of modern dialects of SQL, +- the JPA _criteria query_ API, along with extensions, allowing almost any HQL query to be constructed programmatically via a typesafe API, and, of course +- for when all else fails, _native SQL_ queries. + +[[hql-queries]] +=== HQL queries + +:hql: {doc-user-guide-url}#query-language + +A full discussion of the query language would require just as much text as the rest of this Introduction. +Fortunately, HQL is already described in exhaustive (and exhausting) detail in _A guide to Hibernate Query Language 6_. +It doesn't make sense to repeat that information here. +// The query language is discussed in great detail below in <>. + +Here we want to see how to execute a query via the `Session` or `EntityManager` API. +The method we call depends on what kind of query it is: + +- _selection queries_ return a result list, but do not modify the data, but +- _mutation queries_ modify data, and return the number of modified rows. + +Selection queries usually start with the keyword `select` or `from`, whereas mutation queries begin with the keyword `insert`, `update`, or `delete`. + +.Executing HQL +[%breakable,cols="10,36,32,22"] +|=== +| Kind | `Session` method | `EntityManager` method | `Query` execution method + +| Selection | `createSelectionQuery(String,Class)` | `createQuery(String,Class)` | `getResultList()`, `getSingleResult()`, or `getSingleResultOrNull()` +| Mutation | `createMutationQuery(String)` | `createQuery(String)` | `executeUpdate()` +|=== + +So for the `Session` API we would write: + +[source,java] +---- +List matchingBooks = + session.createSelectionQuery("from Book where title like :titleSearchPattern", Book.class) + .setParameter("titleSearchPattern", titleSearchPattern) + .getResultList(); +---- + +Or, if we're sticking to the JPA-standard APIs: + +[source,java] +---- +List matchingBooks = + entityManager.createQuery("select b from Book b where b.title like :titleSearchPattern", Book.class) + .setParameter("titleSearchPattern", titleSearchPattern) + .getResultList(); +---- + +The only difference between `createSelectionQuery()` and `createQuery()` is that `createSelectionQuery()` throw an exception if passed an `insert`, `delete`, or `update`. + +In the query above, `:titleSearchPattern` is called a _named parameter_. +We may also identify parameters by a number. +These are called _ordinal parameters_. + +[source,java] +---- +List matchingBooks = + session.createSelectionQuery("from Book where title like ?1", Book.class) + .setParameter(1, titleSearchPattern) + .getResultList(); +---- + +When a query has multiple parameters, named parameters tend to be easier to read, even if slightly more verbose. + +[WARNING] +// .Using parameters to avoid injection attacks +==== +_Never_ concatenate user input with HQL and pass the concatenated string to `createSelectionQuery()`. +This would open up the possibility for an attacker to execute arbitrary code on your database server. +==== + +If we're expecting a query to return a single result, we can use `getSingleResult()`. + +[source,java] +---- +Book book = + session.createSelectionQuery("from Book where isbn = ?1", Book.class) + .setParameter(1, isbn) + .getSingleResult(); +---- + +Or, if we're expecting it to return at most one result, we can use `getSingleResultOrNull()`. + +[source,java] +---- +Book bookOrNull = + session.createSelectionQuery("from Book where isbn = ?1", Book.class) + .setParameter(1, isbn) + .getSingleResultOrNull(); +---- + +The difference, of course, is that `getSingleResult()` throws an exception if there's no matching row in the database, whereas `getSingleResultOrNull()` just returns `null`. + +By default, Hibernate dirty checks entities in the persistence context before executing a query, in order to determine if the session should be flushed. +If there are many entities association with the persistence context, then this can be an expensive operation. + +To disable this behavior, set the flush mode to `COMMIT` or `MANUAL`: + +[source,java] +---- +Book bookOrNull = + session.createSelectionQuery("from Book where isbn = ?1", Book.class) + .setParameter(1, isbn) + .setHibernateFlushMode(MANUAL) + .getSingleResult(); +---- + +[CAUTION] +==== +Setting the flush mode to `COMMIT` or `MANUAL` might cause the query to return stale results. +==== + +Occasionally we need to build a query at runtime, from a set of optional conditions. +For this, JPA offers an API which allows programmatic construction of a query. + +[[criteria-queries]] +=== Criteria queries + +Imagine we're implementing some sort of search screen, where the user of our system is offered several different ways to constrain the query result set. +For example, we might let them search for books by title and/or the author name. +Of course, we could construct a HQL query by string concatenation, but this is a bit fragile, so it's quite nice to have an alternative. + +.HQL is implemented in terms of criteria objects +**** +Actually, in Hibernate 6, every HQL query is compiled to a criteria query before being translated to SQL. +This ensures that the semantics of HQL and criteria queries are identical. +**** + +First we need an object for building criteria queries. +Using the JPA-standard APIs, this would be a `CriteriaBuilder`, and we get it from the `EntityManagerFactory`: + +[source,java] +---- +CriteriaBuilder builder = entityManagerFactory.getCriteriaBuilder(); +---- + +But if we have a `SessionFactory`, we get something much better, a `HibernateCriteriaBuilder`: + +[source,java] +---- +HibernateCriteriaBuilder builder = sessionFactory.getCriteriaBuilder(); +---- + +The `HibernateCriteriaBuilder` extends `CriteriaBuilder` and adds many operations that JPQL doesn't have. + +[TIP] +// .Getting a `HibernateCriteriaBuilder` in JPA +==== +If you're using `EntityManagerFactory`, don't despair, you have two perfectly good ways to obtain the `HibernateCriteriaBuilder` associated with that factory. +Either: + +[source,java] +---- +HibernateCriteriaBuilder builder = + entityManagerFactory.unwrap(SessionFactory.class).getCriteriaBuilder(); +---- + +Or simply: + +[source,java] +---- +HibernateCriteriaBuilder builder = + (HibernateCriteriaBuilder) entityManagerFactory.getCriteriaBuilder(); +---- +==== + +We're ready to create a criteria query. + +[source,java] +---- +CriteriaQuery query = builder.createQuery(Book.class); +Root book = query.from(Book.class); +Predicate where = builder.conjunction(); +if (titlePattern != null) { + where = builder.and(where, builder.like(book.get(Book_.title), titlePattern)); +} +if (namePattern != null) { + Join author = book.join(Book_.author); + where = builder.and(where, builder.like(author.get(Author_.name), namePattern)); +} +query.select(book).where(where) + .orderBy(builder.asc(book.get(Book_.title))); +---- + +Here, as before, the classes `Book_` and `Author_` are generated by Hibernate's <>. + +[NOTE] +// .Injection attacks and criteria queries +==== +Notice that we did not bother treating `titlePattern` and `namePattern` as parameters. +That's safe because, by default, Hibernate automatically and transparently treats strings passed to the `CriteriaBuilder` as JDBC parameters. +==== + +Execution of a criteria query works almost exactly like execution of HQL. + +.Executing criteria queries +[%breakable,cols="10,36,32,22"] +|=== +| Kind | `Session` method | `EntityManager` method | `Query` execution method + +| Selection | `createSelectionQuery(CriteriaQuery)` | `createQuery(CriteriaQuery)` | `getResultList()`, `getSingleResult()`, or `getSingleResultOrNull()` +| Mutation | `createMutationQuery(CriteriaUpdate)` or `createQuery(CriteriaDelete)` | `createQuery(CriteriaUpdate)` or `createQuery(CriteriaDelte)` | `executeUpdate()` +|=== + +For example: + +[source,java] +---- +List matchingBooks = + session.createSelectionQuery(query) + .getResultList(); +---- + +Update, insert, and delete queries work similarly: + +[source,java] +---- +CriteriaDelete delete = builder.createCriteriaDelete(Book.class); +Root book = delete.from(Book.class); +delete.where(builder.lt(builder.year(book.get(Book_.publicationDate)), 2000)); +session.createMutationQuery(delete).executeUpdate(); +---- + +When all else fails, and sometimes even before that, we're left with the option of writing a query in SQL. + +[[native-queries]] +=== Native SQL queries + +HQL is a powerful language which helps reduce the verbosity of SQL, and significantly increases portability of queries between databases. +But ultimately, the true value of ORM is not in avoiding SQL, but in alleviating the pain involved in dealing with SQL result sets once we get them back to our Java program. +As we said <>, Hibernate's generated SQL is meant to be used in conjunction with handwritten SQL, and native SQL queries are one of the facilities we provide to make that easy. + +.Executing SQL +[%breakable,cols="10,36,32,22"] +|=== +| Kind | `Session` method | `EntityManager` method | `Query` execution method + +| Selection | `createNativeQuery(String,Class)` | `createNativeQuery(String,Class)` | `getResultList()`, `getSingleResult()`, or `getSingleResultOrNull()` +| Mutation | `createNativeMutationQuery(String)` | `createNativeQuery(String)` | `executeUpdate()` +| Stored procedure | `createStoredProcedureCall(String)` | `createStoredProcedureQuery(String)` | `execute()` +|=== + +For the most simple cases, Hibernate can infer the shape of the result set: + +[source, java] +---- +Book book = + session.createNativeQuery("select * from Books where isbn = ?1", Book.class) + .getSingleResult(); + +String title = + session.createNativeQuery("select title from Books where isbn = ?1", String.class) + .getSingleResult(); +---- + +However, in general, there isn't enough information in the JDBC `ResultSetMetaData` to infer the mapping of columns to entity objects. +So for more complicated cases, you'll need to use the `@SqlResultSetMapping` annotation to define a named mapping, and pass the name to `createNativeQuery()`. This gets fairly messy, so we don't want to hurt your eyes by showing you an example of it. + +By default, Hibernate doesn't flush the session before execution of a native query. +That's because the session is unaware of which modifications held in memory would affect the results of the query. + +So if there are any unflushed changes to ``Book``s, this query might return stale data: + +[source,java] +---- +List books = + session.createNativeQuery("select * from Books") + .getResultList() +---- + +There's two ways to ensure the persistence context is flushed before this query is executed. + +Either, we could simply force a flush by set the flush mode to `ALWAYS`: + +[source,java] +---- +List books = + session.createNativeQuery("select * from Books") + .setHibernateFlushMode(ALWAYS) + .getResultList() +---- + +Or, alternatively, we could tell Hibernate which modified state affects the results of the query: + +[source,java] +---- +List books = + session.createNativeQuery("select * from Books") + .addSynchronizedEntityClass(Book.class) + .getResultList() +---- + +[TIP] +==== +You can call stored procedures using `createStoredProcedureQuery()` or `createStoredProcedureCall()`. +==== + +[[pagination]] +=== Limits, pagination, and ordering + +If a query might return more results than we can handle at one time, we may specify: + +- a _limit_ on the maximum number of rows returned, and, +- optionally, an _offset_, the first row of an ordered result set to return. + +[TIP] +==== +The offset is used to paginate query results. +==== + +There's two ways to add a limit or offset to a HQL or native SQL query: + +- using the syntax of the query language itself, for example, `offset 10 rows fetch next 20 rows only`, or +- using the methods `setFirstResult()` and `setMaxResults()` of the `SelectionQuery` interface. + +If the limit or offset is parameterized, the second option is simpler. +For example, this: + +[source,java] +---- +List books = + session.createSelectionQuery("from Book where title like ?1 order by title") + .setParameter(1, titlePattern) + .setMaxResults(MAX_RESULTS) + .getResultList(); +---- + +is simpler than: + +[source,java] +---- +List books = + session.createSelectionQuery("from Book where title like ?1 order by title fetch first ?2 rows only") + .setParameter(1, titlePattern) + .setParameter(2, MAX_RESULTS) + .getResultList(); +---- + +Hibernate's `SelectionQuery` has a slightly different way to paginate the query results: + +[source,java] +---- +List books = + session.createSelectionQuery("from Book where title like ?1 order by title") + .setParameter(1, titlePattern) + .setMaxResults(MAX_RESULTS) + .getResultList(); +---- + +A closely-related issue is ordering, for which Hibernate ORM 6.3 offers an incubating API. + +Unfortunately, there's no way to do this using JPA's `TypedQuery` interface. + +.Methods for query limits, pagination, and ordering +[%breakable,cols="30,~,^15"] +|=== +| Method name | Purpose | JPA-standard + +| `setMaxResults()` | Set a limit on the number of results returned by a query | ✔ +| `setFirstResult()` | Set an offset on the results returned by a query | ✔ +|=== + +[[projection-lists]] +=== Representing projection lists + +A _projection list_ is the list of things that a query returns, that is, the list of expressions in the `select` clause. +Since Java has no tuple types, representing query projection lists in Java has always been a problem for JPA and Hibernate. +Traditionally, we've just used `Object[]` most of the time: + +[source,java] +---- +var results = + session.createSelectionQuery("select isbn, title from Book", Object[].class) + .getResultList(); + +for (var result : results) { + var isbn = (String) result[0]; + var title = (String) result[1]; + ... +} +---- + +This is really a bit ugly. +Java's `record` types now offer an interesting alternative: + +[source,java] +---- +record IsbnTitle(String isbn, String title) {} + +var results = + session.createSelectionQuery("select isbn, title from Book", IsbnTitle.class) + .getResultList(); + +for (var result : results) { + var isbn = result.isbn(); + var title = result.title(); + ... +} +---- +Notice that we're able to declare the `record` right before the line which executes the query. + +Now, this is only _superficially_ more typesafe, since the query itself is not checked statically, and so we can't say it's objectively better. +But perhaps you find it more aesthetically pleasing. +And if we're going to be passing query results around the system, the use of a `record` type is _much_ better. + +The criteria query API offers a much more satisfying solution to the problem. +Consider the following code: + +[source,java] +---- +var builder = sessionFactory.getCriteriaBuilder(); +var query = builder.createTupleQuery(); +var book = query.from(Book.class); +var bookTitle = book.get(Book_.title); +var bookIsbn = book.get(Book_.isbn); +var bookPrice = book.get(Book_.price); +query.select(builder.tuple(bookTitle, bookIsbn, bookPrice)); +var resultList = session.createSelectionQuery(query).getResultList(); +for (var result: resultList) { + String title = result.get(bookTitle); + String isbn = result.get(bookIsbn); + BigDecimal price = result.get(bookPrice); + ... +} +---- + +This code is manifestly completely typesafe, and much better than we can hope to do with HQL. + +[[named-queries]] +=== Named queries + +The `@NamedQuery` annotation lets us define a HQL query that is compiled and checked as part of the bootstrap process. +This means we find out about errors in our queries earlier, instead of waiting until the query is actually executed. +We can place the `@NamedQuery` annotation on any class, even on an entity class. + +[source,java] +---- +@NamedQuery(name="10BooksByTitle", + query="from Book where title like :titlePattern order by title fetch first 10 rows only") +class BookQueries {} +---- + +We have to make sure that the class with the `@NamedQuery` annotation will be scanned by Hibernate, either: + +- by adding `org.hibernate.example.BookQueries` to `persistence.xml`, or +- by calling `configuration.addClass(BookQueries.class)`. + +[TIP] +==== +Unfortunately, JPA's `@NamedQuery` annotation can't be placed on a package descriptor. +Therefore, Hibernate provides a very similar annotation, `@org.hibernate.annotations.NamedQuery` which _can_ be specified at the package level. +If we declare a named query at the package level, we must call: +[source,java] +---- +configuration.addPackage("org.hibernate.example") +---- +so that Hibernate knows where to find it. +==== + +The `@NamedNativeQuery` annotation lets us do the same for native SQL queries. +There's much less advantage to using `@NamedNativeQuery`, because there is very little that Hibernate can do to validate the correctness of a query written in the native SQL dialect of your database. + +.Executing named queries +[%breakable,cols="10,36,32,22"] +|=== +| Kind | `Session` method | `EntityManager` method | `Query` execution method + +| Selection | `createNamedSelectionQuery(String,Class)` | `createNamedQuery(String,Class)` | `getResultList()`, `getSingleResult()`, or `getSingleResultOrNull()` +| Mutation | `createNamedMutationQuery(String)` | `createNamedQuery(String)` | `executeUpdate()` +|=== + +We execute our named query like this: + +[source,java] +---- +List books = + entityManager.createNamedQuery("10BooksByTitle") + .setParameter("titlePattern", titlePattern) + .getResultList() +---- + +Note that the code which executes the named query is not aware of whether the query was written in HQL or in native SQL, making it slightly easier to change and optimize the query later. + +[TIP] +==== +:query-validator: https://github.com/hibernate/query-validator/ + +It's nice to have our queries checked at startup time. +It's even better to have them checked at compile time. +Back in <>, we mentioned that the {query-validator}[Query Validator] can do that for us. +In fact, the Query Validator will even check HQL query strings that occur as arguments to `createQuery()` and friends. +So if we use the Query Validator, there's not much advantage to the use of named queries. +==== + +[[load-access]] +=== Controlling lookup by id + +We can do almost anything via HQL, criteria, or native SQL queries. +But when we already know the identifier of the entity we need, a query can feel like overkill. +And queries don't make efficient use of the <>. + +We met the <> method earlier. +It's the most basic way to perform a _lookup_ by id. +But as we also <>, it can't quite do everything. +Therefore, Hibernate has some APIs that streamline certain more complicated lookups: + +.Operations for lookup by id +[%breakable,cols="30,~"] +|=== +| Method name | Purpose + +| `byId()` | Lets us specify association fetching via an `EntityGraph`, as we saw; also lets us specify some additional options, including how the lookup <>, and whether the entity should be loaded in read-only mode +| `byMultipleIds()` | Lets us load a _batch_ of ids at the same time +|=== + +Batch loading is very useful when we need to retrieve multiple instances of the same entity class by id: + +[source,java] +---- +var graph = session.createEntityGraph(Book.class); +graph.addSubgraph(Book_.publisher); + +List books = + session.byMultipleIds(Book.class) + .withFetchGraph(graph) // control association fetching + .withBatchSize(20) // specify an explicit batch size + .with(CacheMode.GET) // control interaction with the cache + .multiLoad(bookIds); +---- + +The given list of `bookIds` will be broken into batches, and each batch will be fetched from the database in a single `select`. +If we don't specify the batch size explicitly, a batch size will be chosen automatically. + +We also have some operations for working with lookups by <>: + +[%breakable,cols="30,~"] +|=== +| Method name | Purpose + +| `bySimpleNaturalId()` | For an entity with just one attribute is annotated `@NaturalId` +| `byNaturalId()` | For an entity with multiple attributes are annotated `@NaturalId` +| `byMultipleNaturalId()` | Lets us load a _batch_ of natural ids at the same time +|=== + +Here's how we can retrieve an entity by its composite natural id: + +[source,java] +---- +Book book = + session.byNaturalId(Book.class) + .using(Book_.isbn, isbn) + .using(Book_.printing, printing) + .load(); +---- + +Notice that this code fragment is completely typesafe, again thanks to the <>. + +[[jdbc]] +=== Interacting directly with JDBC + +From time to time we run into the need to write some code that calls JDBC directly. +Unfortunately, JPA offers no good way to do this, but the Hibernate `Session` does. + +[source,java] +---- +session.doWork(connection -> { + try (var callable = connection.prepareCall("{call myproc(?)}")) { + callable.setLong(1, argument); + callable.execute(); + } +}); +---- + +The `Connection` passed to the work is the same connection being used by the session, and so any work performed using that connection occurs in the same transaction context. + +If the work returns a value, use `doReturningWork()` instead of `doWork()`. + +[TIP] +==== +In a container environment where transactions and database connections are managed by the container, this might not be the easiest way to obtain the JDBC connection. +==== + +[[advice]] +=== What to do when things go wrong + +Object/relational mapping has been called the "Vietnam of computer science". +The person who made this analogy is American, and so one supposes that he meant to imply some kind of unwinnable war. +This is quite ironic, since at the very moment he made this comment, Hibernate was already on the brink of winning the war. + +Today, Vietnam is a peaceful country with exploding per-capita GDP, and ORM is a solved problem. +That said, Hibernate is complex, and ORM still presents many pitfalls for the inexperienced, even occasionally for the experienced. +Sometimes things go wrong. + +In this section we'll quickly sketch some general strategies for avoiding "quagmires". + +- Understand SQL and the relational model. + Know the capabilities of your RDBMS. + Work closely with the DBA if you're lucky enough to have one. + Hibernate is not about "transparent persistence" for Java objects. + It's about making two excellent technologies work smoothly together. +- <> executed by Hibernate. + You cannot know that your persistence logic is correct until you've actually inspected the SQL that's being executed. + Even when everything seems to be "working", there might be a lurking <>. +- Be careful when <>. + In principle, you should update _both ends_ of the association. + But Hibernate doesn't strictly enforce that, since there are some situations where such a rule would be too heavy-handed. + Whatever the case, it's up to you to maintain consistency across your model. +- Never <> across threads or concurrent transactions. + Have a strategy or framework to guarantee this never happens. +- When running queries that return large result sets, take care to consider the size of the <>. + Consider using a <>. +- Think carefully about the semantics of the <>, and how the caching policies impact transaction isolation. +- Avoid fancy bells and whistles you don't need. + Hibernate is incredibly feature-rich, and that's a good thing, because it serves the needs of a huge number of users, many of whom have one or two very specialized needs. + But nobody has _all_ those specialized needs. + In all probability, you have none of them. + Write your domain model in the simplest way that's reasonable, using the simplest mapping strategies that make sense. +- When something isn't behaving as you expect, _simplify_. + Isolate the problem. + Find the absolute minimum test case which reproduces the behavior, _before_ asking for help online. + Most of the time, the mere act of isolating the problem will suggest an obvious solution. +- Avoid frameworks and libraries that "wrap" JPA. + If there's any one criticism of Hibernate and ORM that sometimes _does_ ring true, it's that it takes you too far from direct control over JDBC. + An additional layer just takes you even further. +- Avoid copy/pasting code from random bloggers or stackoverflow reply guys. + Many of the suggestions you'll find online just aren't the simplest solution, and many aren't correct for Hibernate 6. + Instead, _understand_ what you're doing; study the Javadoc of the APIs you're using; read the JPA specification; follow the advice we give in this document; go direct to the Hibernate team on Zulip. + (Sure, we can be a bit cantankerous at times, but we _do_ always want you to be successful.) +- Always consider other options. + You don't have to use Hibernate for _everything_. + diff --git a/documentation/src/main/asciidoc/introduction/Introduction.adoc b/documentation/src/main/asciidoc/introduction/Introduction.adoc new file mode 100644 index 000000000000..50645b28cfb9 --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Introduction.adoc @@ -0,0 +1,779 @@ +[[introduction]] +== Introduction + +Hibernate is usually described as a library that makes it easy to map Java classes to relational database tables. +But this formulation does no justice to the central role played by the relational data itself. +So a better description might be: + +**** +Hibernate makes *relational data* visible to a program written in Java, in a *natural* and *typesafe* form, + +1. making it easy to write complex queries and work with their results, +2. letting the program easily synchronize changes made in memory with the database, respecting the ACID properties of transactions, and +3. allowing performance optimizations to be made after the basic persistence logic has already been written. +**** + +Here the relational data is the focus, along with the importance of typesafety. +The goal of _Object/relational mapping_ (ORM) is to eliminate fragile and untypesafe code, and make large programs easier to maintain in the long run. + +ORM takes the pain out of persistence by relieving the developer of the need to hand-write tedious, repetitive, and fragile code for flattening graphs of objects to database tables and rebuilding graphs of objects from flat SQL query result sets. +Even better, ORM makes it much easier to tune performance later, after the basic persistence logic has already been written. + +[TIP] +// .ORM or SQL? +==== +A perennial question is: should I use ORM, or plain SQL? +The answer is usually: _use both_. +JPA and Hibernate were designed to work _in conjunction with_ handwritten SQL. +You see, most programs with nontrivial data access logic will benefit from the use of ORM at least _somewhere_. +But if Hibernate is making things more difficult, for some particularly tricky piece of data access logic, the only sensible thing to do is to use something better suited to the problem! +Just because you're using Hibernate for persistence doesn't mean you have to use it for _everything_. +==== + +Developers often ask about the relationship between Hibernate and JPA, so let's take a short detour into some history. + +[[hibernate-and-jpa]] +=== Hibernate and JPA + +Hibernate was the inspiration behind the _Java_ (now _Jakarta_) _Persistence API_, or JPA, and includes a complete implementation of the latest revision of this specification. + +.The early history of Hibernate and JPA +**** +The Hibernate project began in 2001, when Gavin King's frustration with Entity Beans in EJB 2 boiled over. +It quickly overtook other open source and commercial contenders to become the most popular persistence solution for Java, and the book _Hibernate in Action_, written with Christian Bauer, was an influential bestseller. + +In 2004, Gavin and Christian joined a tiny startup called JBoss, and other early Hibernate contributors soon followed: Max Rydahl Andersen, Emmanuel Bernard, Steve Ebersole, and Sanne Grinovero. + +Soon after, Gavin joined the EJB 3 expert group and convinced the group to deprecate Entity Beans in favor of a brand-new persistence API modelled after Hibernate. +Later, members of the TopLink team got involved, and the Java Persistence API evolved as a collaboration between—primarily—Sun, JBoss, Oracle, and Sybase, under the leadership of Linda Demichiel. + +Over the intervening two decades, _many_ talented people have contributed to the development of Hibernate. +We're all especially grateful to Steve, who has led the project for many years, since Gavin stepped back to focus in other work. +**** + +We can think of the API of Hibernate in terms of three basic elements: + +- an implementation of the JPA-defined APIs, most importantly, of the interfaces `EntityManagerFactory` and `EntityManager`, and of the JPA-defined O/R mapping annotations, +- a _native API_ exposing the full set of available functionality, centered around the interfaces `SessionFactory`, which extends `EntityManagerFactory`, and `Session`, which extends `EntityManager`, and +- a set of _mapping annotations_ which augment the O/R mapping annotations defined by JPA, and which may be used with the JPA-defined interfaces, or with the native API. + +Hibernate also offers a range of SPIs for frameworks and libraries which extend or integrate with Hibernate, but we're not interested in any of that stuff here. + +image::images/api-overview.png[API overview,width=700,align="center"] + +As an application developer, you must decide whether to: + +- write your program in terms of `Session` and `SessionFactory`, or +- maximize portability to other implementations of JPA by, wherever reasonable, writing code in terms of `EntityManager` and `EntityManagerFactory`, falling back to the native APIs only where necessary. + +Whichever path you take, you will use the JPA-defined mapping annotations most of the time, and the Hibernate-defined annotations for more advanced mapping problems. + +[TIP] +// .Developing with "pure" JPA +==== +You might wonder if it's possible to develop an application using _only_ JPA-defined APIs, and, indeed, that's possible in principle. +JPA is a great baseline that really nails the basics of the object/relational mapping problem. +But without the native APIs, and extended mapping annotations, you miss out on much of the power of Hibernate. +==== + +Since Hibernate existed before JPA, and since JPA was modelled on Hibernate, we unfortunately have some competition and duplication in naming between the standard and native APIs. +For example: + +.Examples of competing APIs with similar naming +|=== +| Hibernate | JPA + +| `org.hibernate.annotations.CascadeType` | `javax.persistence.CascadeType` +| `org.hibernate.FlushMode` | `javax.persistence.FlushModeType` +| `org.hibernate.annotations.FetchMode` | `javax.persistence.FetchType` +| `org.hibernate.query.Query` | `javax.persistence.Query` +| `org.hibernate.Cache` | `javax.persistence.Cache` +| `@org.hibernate.annotations.NamedQuery` | `@javax.persistence.NamedQuery` +| `@org.hibernate.annotations.Cache` | `@javax.persistence.Cacheable` +|=== + +Typically, the Hibernate-native APIs offer something a little extra that's missing in JPA, so this isn't exactly a _flaw_. +But it's something to watch out for. + +[[java-code]] +=== Writing Java code with Hibernate + +If you're completely new to Hibernate and JPA, you might already be wondering how the persistence-related code is structured. + +Well, typically, our persistence-related code comes in two layers: + +. a representation of our data model in Java, which takes the form of a set of annotated entity classes, and +. a larger number of functions which interact with Hibernate's APIs to perform the persistence operations associated with your various transactions. + +The first part, the data or "domain" model, is usually easier to write, but doing a great and very clean job of it will strongly affect your success in the second part. + +Most people implement the domain model as a set of what we used to call "Plain Old Java Objects", that is, as simple Java classes with no direct dependencies on technical infrastructure, nor on application logic which deals with request processing, transaction management, communications, or interaction with the database. + +[TIP] +==== +Take your time with this code, and try to produce a Java model that's as close as reasonable to the relational data model. Avoid using exotic or advanced mapping features when they're not really needed. +When in the slightest doubt, map a foreign key relationship using `@ManyToOne` with `@OneToMany(mappedBy=...)` in preference to more complicated association mappings. +==== + +The second part of the code is much trickier to get right. This code must: + +- manage transactions and sessions, +- interact with the database via the Hibernate session, +- fetch and prepare data needed by the UI, and +- handle failures. + +[TIP] +==== +Responsibility for transaction and session management, and for recovery from certain kinds of failure, is best handled in some sort of framework code. +==== + +We're going to <> to the thorny question of how this persistence logic should be organized, and how it should fit into the rest of the system. +// First we want to make the ideas above concrete by seeing a simple example program that uses Hibernate in isolation. + +[[hello-hibernate]] +=== Hello, Hibernate + +Before we get deeper into the weeds, we'll quickly present a basic example program that will help you get started if you don't already have Hibernate integrated into your project. + +We begin with a simple gradle build file: + +[[build-gradle]] +[source,groovy] +.`build.gradle` +---- +plugins { + id 'java' +} + +group = 'org.example' +version = '1.0-SNAPSHOT' + +repositories { + mavenCentral() +} + +dependencies { + // the GOAT ORM + implementation 'org.hibernate.orm:hibernate-core:6.2.2.Final' + + // Hibernate Validator + implementation 'org.hibernate.validator:hibernate-validator:8.0.0.Final' + implementation 'org.glassfish:jakarta.el:4.0.2' + + // Agroal connection pool + implementation 'org.hibernate.orm:hibernate-agroal:6.2.2.Final' + implementation 'io.agroal:agroal-pool:2.1' + + // logging via Log4j + implementation 'org.apache.logging.log4j:log4j-core:2.20.0' + + // JPA Metamodel Generator + annotationProcessor 'org.hibernate.orm:hibernate-jpamodelgen:6.2.2.Final' + + // Compile-time checking for HQL + //implementation 'org.hibernate:query-validator:2.0-SNAPSHOT' + //annotationProcessor 'org.hibernate:query-validator:2.0-SNAPSHOT' + + // H2 database + runtimeOnly 'com.h2database:h2:2.1.214' +} +---- + +Only the first of these dependencies is absolutely _required_ to run Hibernate. + +Next, we'll add a logging configuration file for log4j: + +[source,properties] +.`log4j2.properties` +---- +rootLogger.level = info +rootLogger.appenderRefs = console +rootLogger.appenderRef.console.ref = console + +logger.hibernate.name = org.hibernate.SQL +logger.hibernate.level = info + +appender.console.name = console +appender.console.type = Console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %highlight{[%p]} %m%n +---- + +Now we need some Java code. +We begin with our _entity class_: + +[[book]] +[source,java] +.`Book.java` +---- +package org.hibernate.example; + +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.validation.constraints.NotNull; + +@Entity +class Book { + @Id + String isbn; + + @NotNull + String title; + + Book() {} + + Book(String isbn, String title) { + this.isbn = isbn; + this.title = title; + } +} +---- + +Finally, let's see code which configures and instantiates Hibernate and asks it to persist and query the entity. +Don't worry if this makes no sense at all right now. +It's the job of this Introduction to make all this crystal clear. + +[[main-hibernate]] +[source,java] +.`Main.java` +---- +package org.hibernate.example; + +import org.hibernate.cfg.Configuration; + +import static java.lang.Boolean.TRUE; +import static java.lang.System.out; +import static org.hibernate.cfg.AvailableSettings.*; + +public class Main { + public static void main(String[] args) { + var sessionFactory = new Configuration() + .addAnnotatedClass(Book.class) + // use H2 in-memory database + .setProperty(URL, "jdbc:h2:mem:db1") + .setProperty(USER, "sa") + .setProperty(PASS, "") + // use Agroal connection pool + .setProperty("hibernate.agroal.maxSize", "20") + // display SQL in console + .setProperty(SHOW_SQL, TRUE.toString()) + .setProperty(FORMAT_SQL, TRUE.toString()) + .setProperty(HIGHLIGHT_SQL, TRUE.toString()) + .buildSessionFactory(); + + // export the inferred database schema + sessionFactory.getSchemaManager().exportMappedObjects(true); + + // persist an entity + sessionFactory.inTransaction(session -> { + session.persist(new Book("9781932394153", "Hibernate in Action")); + }); + + // query data using HQL + sessionFactory.inSession(session -> { + out.println(session.createSelectionQuery("select isbn||': '||title from Book").getSingleResult()); + }); + + // query data using criteria API + sessionFactory.inSession(session -> { + var builder = sessionFactory.getCriteriaBuilder(); + var query = builder.createQuery(String.class); + var book = query.from(Book.class); + query.select(builder.concat(builder.concat(book.get(Book_.isbn), builder.literal(": ")), + book.get(Book_.title))); + out.println(session.createSelectionQuery(query).getSingleResult()); + }); + } +} +---- + +Here we've used Hibernate's native APIs. +We could have used JPA-standard APIs to achieve the same thing. + +[[hello-jpa]] +=== Hello, JPA + +If we limit ourselves to the use of JPA-standard APIs, we need to use XML to configure Hibernate. + +[source,xml] +.`META-INF/persistence.xml` +---- + + + + + org.hibernate.example.Book + + + + + + + + + + + + + + + + + + + + + + +---- + +Note that our `build.gradle` and `log4j2.properties` files are unchanged. + +Our entity class is also unchanged from what we had before. + +Unfortunately, JPA doesn't offer an `inSession()` method, so we'll have to implement session and transaction management ourselves. +We can put that logic in our own `inSession()` function, so that we don't have to repeat it for every transaction. +Again, you don't need to understand any of this code right now. + +[[main-jpa]] +[source,java] +.`Main.java` (JPA version) +---- +package org.hibernate.example; + +import jakarta.persistence.EntityManager; +import jakarta.persistence.EntityManagerFactory; + +import java.util.Map; +import java.util.function.Consumer; + +import static jakarta.persistence.Persistence.createEntityManagerFactory; +import static java.lang.System.out; +import static org.hibernate.cfg.AvailableSettings.JAKARTA_HBM2DDL_DATABASE_ACTION; +import static org.hibernate.tool.schema.Action.CREATE; + +public class Main { + public static void main(String[] args) { + var factory = createEntityManagerFactory("example", + // export the inferred database schema + Map.of(JAKARTA_HBM2DDL_DATABASE_ACTION, CREATE)); + + // persist an entity + inSession(factory, entityManager -> { + entityManager.persist(new Book("9781932394153", "Hibernate in Action")); + }); + + // query data using HQL + inSession(factory, entityManager -> { + out.println(entityManager.createQuery("select isbn||': '||title from Book").getSingleResult()); + }); + + // query data using criteria API + inSession(factory, entityManager -> { + var builder = factory.getCriteriaBuilder(); + var query = builder.createQuery(String.class); + var book = query.from(Book.class); + query.select(builder.concat(builder.concat(book.get(Book_.isbn), builder.literal(": ")), + book.get(Book_.title))); + out.println(entityManager.createQuery(query).getSingleResult()); + }); + } + + // do some work in a session, performing correct transaction management + static void inSession(EntityManagerFactory factory, Consumer work) { + var entityManager = factory.createEntityManager(); + var transaction = entityManager.getTransaction(); + try { + transaction.begin(); + work.accept(entityManager); + transaction.commit(); + } + catch (Exception e) { + if (transaction.isActive()) transaction.rollback(); + throw e; + } + finally { + entityManager.close(); + } + } +} +---- + +In practice, we never access the database directly from a `main()` method. +So now let's talk about how to organize persistence logic in a real system. +The rest of this chapter is not compulsory. +If you're itching for more details about Hibernate itself, you're quite welcome to skip straight to the <>, and come back later. + +[[organizing-persistence]] +=== Organizing persistence logic + +In a real program, persistence logic like the code shown above is usually interleaved with other sorts of code, including logic: + +- implementing the rules of the business domain, or +- for interacting with the user. + +Therefore, many developers quickly—even _too quickly_, in our opinion—reach for ways to isolate the persistence logic into some sort of separate architectural layer. +We're going to ask you to suppress this urge for now. + +[TIP] +==== +The _easiest_ way to use Hibernate is to call the `Session` or `EntityManager` directly. +If you're new to Hibernate, frameworks which wrap JPA are only going to make your life more difficult. +==== + +We prefer a _bottom-up_ approach to organizing our code. +We like to start thinking about methods and functions, not about architectural layers and container-managed objects. +To illustrate the sort of approach to code organization that we advocate, let's consider a service which queries the database using HQL or SQL. + +We might start with something like this, a mix of UI and persistence logic: + +[source,java] +---- +@Path("/") @Produces("application/json") +public class BookResource { + @GET @Path("book/{isbn}") + public Book getBook(String isbn) { + var book = sessionFactory.fromTransaction(session -> session.find(Book.class, isbn)); + return book == null ? Response.status(404).build() : book; + } +} +---- +Indeed, we might also _finish_ with something like that—it's quite hard to identify anything concretely wrong with the code above, and for such a simple case it seems really difficult to justify making this code more complicated by introducing additional objects. + +One very nice aspect of this code, which we wish to draw your attention to, is that session and transaction management is handled by generic "framework" code, just as we already recommended above. +In this case, we're using the `fromTransaction()` method, which happens to come built in to Hibernate. +But you might prefer to use something else, for example: + +- in a container environment like Jakarta EE or Quarkus, _container-managed transactions_ and _container-managed persistence contexts_, or +- something you write yourself. + +The important thing is that calls like `createEntityManager()` and `getTransaction().begin()` don't belong in regular program logic, because it's tricky and tedious to get the error handling correct. + +Let's now consider a slightly more complicated case. + +[source,java] +---- +@Path("/") @Produces("application/json") +public class BookResource { + private static final RESULTS_PER_PAGE = 20; + + @GET @Path("books/{titlePattern}/{page:\\d+}") + public List findBooks(String titlePattern, int page) { + var books = sessionFactory.fromTransaction(session -> { + return entityManager.createQuery("from Book where title like ?1 order by title", Book.class) + .setParameter(1, titlePattern) + .setMaxResults(RESULTS_PER_PAGE) // return at most 20 results + .setFirstResult(page*RESULTS_PER_PAGE) // start from the given page of results + .getResultList(); + }); + return books.isEmpty() ? Response.status(404).build() : books; + } + +} +---- + +This is fine, and we won't complain if you prefer to leave the code exactly as it appears above. +But there's one thing we could perhaps improve. +We love super-short methods with single responsibilities, and there looks to be an opportunity to introduce one here. +Let's hit the code with our favorite thing, the Extract Method refactoring. We obtain: + +[source,java] +---- +static List findBooksByTitleWithPagination(EntityManager entityManager, + String titlePattern, int max, int start) { + return entityManager.createQuery("from Book where title like ?1 order by title", Book.class) + .setParameter(1, titlePattern) + .setMaxResults(max) + .setFirstResult(start) + .getResultList(); +} +---- + +This is an example of a _query method_, a function which accepts arguments to the parameters of a HQL or SQL query, and executes the query, returning its results to the caller. +And that's all it does; it doesn't orchestrate additional program logic, and it doesn't perform transaction or session management. + +It's even better to specify the query string using the `@NamedQuery` annotation, so that Hibernate can validate the query it at startup time, that is, when the `SessionFactory` is created, instead of when the query is first executed. + +We need a place to put the annotation, so lets move our query method to a new class: + +[source,java] +---- +@NamedQuery(name="findBooksByTitle", + query="from Book where title like :title order by title") +class Queries { + + static List findBooksByTitleWithPagination(EntityManager entityManager, + String titlePattern, int max, int start) { + return entityManager.createNamedQuery("findBooksByTitle", Book.class) + .setParameter("title", titlePattern) + .setMaxResults(max) + .setFirstResult(start) + .getResultList(); + } +} +---- + +Notice that our query method doesn't attempt to hide the `EntityManager` from its clients. +Indeed, the client code is responsible for providing the `EntityManager` or `Session` to the query method. +This is a quite distinctive feature of our whole approach. + +The client code may: + +- obtain an `EntityManager` or `Session` by calling `inTransaction()` or `fromTransaction()`, as we saw above, or, +- in an environment with container-managed transactions, it might obtain it via dependency injection. + +Whatever the case, the code which orchestrates a unit of work usually just calls the `Session` or `EntityManager` directly, passing it along to helper methods like our query method if necessary. + +[source,java] +---- +@GET +@Path("books/{titlePattern}") +public List findBooks(String titlePattern) { + var books = sessionFactory.fromTransaction(session -> + Queries.findBooksByTitleWithPagination(session, titlePattern, + RESULTS_PER_PAGE, RESULTS_PER_PAGE*page)); + return books.isEmpty() ? Response.status(404).build() : books; +} +---- + +You might be thinking that our query method looks a bit boilerplatey. +That's true, perhaps, but we're much more concerned that it's not very typesafe. +Indeed, for many years, the lack of compile-time checking for HQL queries and code which binds arguments to query parameters was our number one source of discomfort with Hibernate. + +Fortunately, there's now a solution to both problems: as an incubating feature of Hibernate 6.3, we now offer the possibility to have the Metamodel Generator fill in the implementation of such query methods for you. + +Now that we have a rough picture of what our persistence logic might look like, it's natural to ask how we should test this code. + +[[testing]] +=== Testing persistence logic + +:h2: http://www.h2database.com + +When we write tests for our persistence logic, we're going to need: + +1. a database, with +2. an instance of the schema mapped by our persistent entities, and +3. a set of test data, in a well-defined state at the beginning of each test. + +It might seem obvious that we should test against the same database system that we're going to use in production, and, indeed, we should certainly have at least _some_ tests for this configuration. +But on the other hand, tests which perform I/O are much slower than tests which don't, and most databases can't be set up to run in-process. + +So, since most persistence logic written using Hibernate 6 is _extremely_ portable between databases, it often makes good sense to test against an in-memory Java database. +({h2}[H2] is the one we recommend.) + +[CAUTION] +==== +We do need to be careful here if our persistence code uses native SQL, or if it uses concurrency-management features like pessimistic locks. +==== + +Whether we're testing against our real database, or against an in-memory Java database, we'll need to export the schema at the beginning of a test suite. +We _usually_ do this when we create the Hibernate `SessionFactory` or JPA `EntityManager`, and so traditionally we've used a <> for this. + +The JPA-standard property is `jakarta.persistence.schema-generation.database.action`. +For example, if we're using `Configuration` to configure Hibernate, we could write: + +[source,java] +---- +configuration.setProperty(AvailableSettings.JAKARTA_HBM2DDL_DATABASE_ACTION, + Action.SPEC_ACTION_DROP_AND_CREATE); +---- + +Alternatively, in Hibernate 6, we may use the new `SchemaManager` API to export the schema, just as we did <>. + +[source,java] +---- +sessionFactory.getSchemaManager().exportMappedObjects(true); +---- + +Since executing DDL statements is very slow on many databases, we don't want to do this before every test. +Instead, to ensure that each test begins with the test data in a well-defined state, we need to do two things before each test: + +1. clean up any mess left behind by the previous test, and then +2. reinitialize the test data. + +We may truncate all the tables, leaving an empty database schema, using the `SchemaManager`. + +[source,java] +---- +sessionFactory.getSchemaManager().truncateMappedObjects(); +---- + +After truncating tables, we might need to initialize our test data. +We may specify test data in a SQL script, for example: + +[source,sql] +./import.sql +---- +insert into Books (isbn, title) values ('9781932394153', 'Hibernate in Action') +insert into Books (isbn, title) values ('9781932394887', 'Java Persistence with Hibernate') +insert into Books (isbn, title) values ('9781617290459', 'Java Persistence with Hibernate, Second Edition') +---- + +If we name this file `import.sql`, and place it in the root classpath, that's all we need to do. + +Otherwise, we need to specify the file in the <> `jakarta.persistence.schema-generation.create-script-source`. + +This SQL script will be executed every time `exportMappedObjects()` or `truncateMappedObjects()` is called. + +[TIP] +===== +Another important test we'll need is one which validates our <> against the actual database schema. +This is again the job of the schema management tooling, either: +[source,java] +---- +configuration.setProperty(AvailableSettings.JAKARTA_HBM2DDL_DATABASE_ACTION, + Action.ACTION_VALIDATE); + +---- +Or: +[source,java] +---- +sessionFactory.getSchemaManager().validateMappedObjects(); +---- +This "test" is one which many people like to run even in production, when the system starts up. +===== + +[[architecture]] +=== Architecture and the persistence layer + +Let's now consider a different approach to code organization, one we treat with suspicion. + +[WARNING] +==== +In this section, we're going to give you our _opinion_. +If you're only interested in facts, or if you prefer not to read things that might undermine the opinion you currently hold, please feel free to skip straight to the <>. +==== + +Hibernate is an architecture-agnostic library, not a framework, and therefore integrates comfortably with a wide range of Java frameworks and containers. +Consistent with our place within the ecosystem, we've historically avoided giving out much advice on architecture. +This is a practice we're now perhaps inclined to regret, since the resulting vacuum has come to be filled with advice from people advocating architectures, design patterns, and extra frameworks which we suspect make Hibernate a bit less pleasant to use than it should be. + +In particular, frameworks which wrap JPA seem to add bloat while subtracting some of the fine-grained control over data access that Hibernate works so hard to provide. +These frameworks don't expose the full feature set of Hibernate, and so the program is forced to work with a less powerful abstraction. + +The stodgy, dogmatic, _conventional_ wisdom, which we hesitate to challenge for simple fear of pricking ourselves on the erect hackles that inevitably accompany such dogma-baiting is: + +> Code which interacts with the database belongs in a separate _persistence layer_. + +We lack the courage—perhaps even the conviction—to tell you categorically to _not_ follow this recommendation. +But we do ask you to consider the cost in boilerplate of any architectural layer, and whether the benefits this cost buys are really worth it in the context of your system. + +To add a little background texture to this discussion, and at the risk of our Introduction degenerating into a rant at such an early stage, we're going ask you to humor us while talk a little more about ancient history. + +[%unbreakable] +.An epic tale of DAOs and Repositories +**** +Back in the dark days of Java EE 4, before the standardization of Hibernate, and subsequent ascendance of JPA in Java enterprise development, it was common to hand-code the messy JDBC interactions that Hibernate takes care of today. +In those terrible times, a pattern arose that we used to call _Data Access Objects_ (DAOs). +A DAO gave you a place to put all that nasty JDBC code, leaving the important program logic cleaner. + +When Hibernate arrived suddenly on the scene in 2001, developers loved it. +But Hibernate implemented no specification, and many wished to reduce or at least _localize_ the dependence of their project logic on Hibernate. +An obvious solution was to keep the DAOs around, but to replace the JDBC code inside them with calls to the Hibernate `Session`. + +We partly blame ourselves for what happened next. + +Back in 2002 and 2003 this really seemed like a pretty reasonable thing to do. +In fact, we contributed to the popularity of this approach by recommending—or at least not discouraging—the use of DAOs in _Hibernate in Action_. +We hereby apologize for this mistake, and for taking much too long to recognize it. + +Eventually, some folks came to believe that their DAOs shielded their program from depending in a hard way on ORM, allowing them to "swap out" Hibernate, and replace it with JDBC, or with something else. +In fact, this was never really true—there's quite a deep difference between the programming model of JDBC, where every interaction with the database is explicit and synchronous, and the programming model of stateful sessions in Hibernate, where updates are implicit, and SQL statements are executed asynchronously. + +But then the whole landscape for persistence in Java changed in April 2006, when the final draft of JPA 1.0 was approved. +Java now had a standard way to do ORM, with multiple high-quality implementations of the standard API. +This was the end of the line for the DAOs, right? + +Well, no. +It wasn't. +DAOs were rebranded "repositories", and continue to enjoy a sort-of zombie afterlife as a front-end to JPA. +But are they really pulling their weight, or are they just unnecessary extra complexity and bloat? An extra layer of indirection that makes stack traces harder to read and code harder to debug? + +Our considered view is that they're mostly just bloat. +The JPA `EntityManager` is a "repository", and it's a standard repository with a well-defined specification written by people who spend all day thinking about persistence. +If these repository frameworks offered anything actually _useful_—and not obviously foot-shooty—over and above what `EntityManager` provides, we would have already added it to `EntityManager` decades ago. +**** + +Ultimately, we're not sure you need a separate persistence layer at all. +At least _consider_ the possibility that it might be OK to call the `EntityManager` directly from your business logic. + +image::images/architecture.png[API overview,pdfwidth="100%",width=1100,align="center"] + +We can already hear you hissing at our heresy. +But before slamming shut the lid of your laptop and heading off to fetch garlic and a pitchfork, take a couple of hours to weigh what we're proposing. + +OK, so, look, if it makes you feel better, one way to view `EntityManager` is to think of it as a single _generic_ "repository" that works for every entity in your system. +From this point of view, JPA _is_ your persistence layer. +And there's few good reasons to wrap this abstraction in a second abstraction that's _less_ generic. + +// We might even analogize `EntityManager` to `List`. +// Then DAO-style repositories would be like having separate `StringList`, `IntList`, `PersonList`, and `BookList` classes. +// They're a parallel class hierarchy that makes the data model harder to evolve over time. + +// Of course, such decisions are highly context-dependent: surely _some_ programs out there really do benefit from isolating the persistence logic into some sort of distinct layer; on the other hand, we're equally sure that there are others which simply _don't_. + +Even where a distinct persistence layer _is_ appropriate, DAO-style repositories aren't the unambiguously most-correct way to factorize the equation: + +- most nontrivial queries touch multiple entities, and so it's often quite ambiguous which repository such a query belongs to, +- most queries are extremely specific to a particular fragment of program logic, and aren't reused in different places across the system, and +- the various operations of a repository rarely interact or share common internal implementation details. + +Indeed, repositories, by nature, exhibit very low _cohesion_. +A layer of repository objects might make sense if you have multiple implementations of each repository, but in practice almost nobody ever does. +That's because they're also extremely highly _coupled_ to their clients, with a very large API surface. +And, on the contrary, a layer is only easily replaceable if it has a very _narrow_ API. + +[%unbreakable] +[TIP] +==== +Some people do indeed use mock repositories for testing, but we really struggle to see any value in this. +If you don't want to run your tests against our real database, it's usually very easy to "mock" the database itself by running tests against an in-memory Java database like H2. +This works even better in Hibernate 6 than in older versions of Hibernate, since HQL is now _much_ more portable between platforms. +==== + +// So even in cases where separation _is_ of benefit, we go on to question the notion that this must be achieved via a layer of container-managed objects. + +// That said, one thing we _do_ understand is the desire to package: +// +// - a HQL or SQL query string with +// - the code which binds its parameters +// +// as a typesafe function. +// DAO-style repositories seem to provide a very natural place to hang such functions, and we suspect that this accounts for at least some of their continued popularity. +// You're probably wondering how _we_ would go about defining such functions. + +// One thing that some repository frameworks offer is the ability to declare an abstract method that queries the database, and have the framework fill in an implementation of the method. +// But the way this works is that you must encode your query into the name of the method itself. +// +// Which, at least in principle, for a not-very-complicated query, leads to a method name like this: +// +// [.text-center] +// `findFirst10ByOrderDistinctPeopleByLastnameOrFirstnameAsc` +// +// This is a much worse query language than HQL. +// I think you can see why we didn't implement this idea in Hibernate. +// +_Phew_, let's move on. + +[[overview]] +=== Overview + +It's now time to begin our journey toward actually _understanding_ the code we saw earlier. + +This introduction will guide you through the basic tasks involved in developing a program that uses Hibernate for persistence: + +1. configuring and bootstrapping Hibernate, and obtaining an instance of `SessionFactory` or `EntityManagerFactory`, +2. writing a _domain model_, that is, a set of _entity classes_ which represent the persistent types in your program, and which map to tables of your database, +3. customizing these mappings when the model maps to a pre-existing relational schema, +4. using the `Session` or `EntityManager` to perform operations which query the database and return entity instances, or which update the data held in the database, +5. writing complex queries using the Hibernate Query Language (HQL) or native SQL, and, finally +6. tuning performance of the data access logic. + +Naturally, we'll start at the top of this list, with the least-interesting topic: _configuration_. diff --git a/documentation/src/main/asciidoc/introduction/Mapping.adoc b/documentation/src/main/asciidoc/introduction/Mapping.adoc new file mode 100644 index 000000000000..e448b8e8ee8b --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Mapping.adoc @@ -0,0 +1,837 @@ +[[object-relational-mapping]] +== Object/relational mapping + +Given a domain model—that is, a collection of entity classes decorated with all the fancy annotations we <> in the previous chapter—Hibernate will happily go away and infer a complete relational schema, and even <> if you ask politely. + +The resulting schema will be entirely sane and reasonable, though if you look closely, you'll find some flaws. +For example, every `VARCHAR` column will have the same length, `VARCHAR(255)`. + +But the process I just described—which we call _top down_ mapping—simply doesn't fit the most common scenario for the use of O/R mapping. +It's only rarely that the Java classes precede the relational schema. +Usually, _we already have a relational schema_, and we're constructing our domain model around the schema. +This is called _bottom up_ mapping. + +[TIP] +// ."Legacy" data +==== +Developers often refer to a pre-existing relational database as "legacy" data. +This tends to conjure images of bad old "legacy apps" written in COBOL or something. +But legacy data is valuable, and learning to work with it is important. +==== + +Especially when mapping bottom up, we often need to customize the inferred object/relational mappings. +This is a somewhat tedious topic, and so we don't want to spend too many words on it. +Instead, we'll quickly skim the most important mapping annotations. + +[[case-convention]] +.Hibernate SQL case convention +**** +Computers have had lowercase letters for rather a long time now. +Most developers learned long ago that text written in MixedCase, camelCase, or even snake_case is easier to read than text written in SHOUTYCASE. +This is just as true of SQL as it is of any other language. + +Therefore, for over twenty years, the convention on the Hibernate project has been that: + +- query language identifiers are written in `lowercase`, +- table names are written in `MixedCase`, and +- column names are written in `camelCase`. + +That is to say, we simply adopted Java's excellent conventions and applied them to SQL. + +Now, there's no way we can force you to follow this convention, even if we wished to. +Hell, you can easily write a `PhysicalNamingStrategy` which makes table and column names ALL UGLY AND SHOUTY LIKE THIS IF YOU PREFER. +But, _by default_, it's the convention Hibernate follows, and it's frankly a pretty reasonable one. +**** + +[[mapping-inheritance]] +=== Mapping entity inheritance hierarchies + +In <> we saw that entity classes may exist within an inheritance hierarchy. +There's three basic strategies for mapping an entity hierarchy to relational tables. +Let's put them in a table, so we can more easily compare the points of difference between them. + +.Entity inheritance mapping strategies +|=== +| Strategy | Mapping | Polymorphic queries | Constraints | Normalization | When to use it + +| `SINGLE_TABLE` +| Map every class in the hierarchy to the same table, and uses the value of a _discriminator column_ to determine which concrete class each row represents. +| To retrieve instances of a given class, we only need to query the one table. +| Attributes declared by subclasses map to columns without `NOT NULL` constraints. 💀 + + Any association may have a `FOREIGN KEY` constraint. 🤓 +| Subclass data is denormalized. 🧐 +| Works well when subclasses declare few or no additional attributes. +| `JOINED` +| Map every class in the hierarchy to a separate table, but each table only maps the attributes declared by the class itself. + + Optionally, a discriminator column may be used. +a| To retrieve instances of a given class, we must `JOIN` the table mapped by the class with: + + - all tables mapped by its superclasses and + - all tables mapped by its subclasses. +| Any attribute may map to a column with a `NOT NULL` constraint. 🤓 + + Any association may have a `FOREIGN KEY` constraint. 🤓 +| The tables are normalized. 🤓 +| The best option when we care a lot about constraints and normalization. +| `TABLE_PER_CLASS` +| Map every concrete class in the hierarchy to a separate table, but denormalize all inherited attributes into the table. +| To retrieve instances of a given class, we must take a `UNION` over the table mapped by the class and the tables mapped by its subclasses. +| Associations targeting a superclass cannot have a corresponding `FOREIGN KEY` constraint in the database. 💀💀 + + Any attribute may map to a column with a `NOT NULL` constraint. 🤓 +| Superclass data is denormalized. 🧐 +| Not very popular. + + From a certain point of view, competes with `@MappedSuperclass`. +|=== + +The three mapping strategies are enumerated by `InheritanceType`. +We specify an inheritance mapping strategy using the `@Inheritance` annotation. + +For mappings with a _discriminator column_, we should: + +- specify the discriminator column name and type by annotating the root entity `@DiscriminatorColumn`, and +- specify the values of this discriminator by annotating each entity in the hierarchy `@DiscriminatorValue`. + +// [[single-table-inheritance]] +// === Single table inheritance + +For single table inheritance we always need a discriminator: + +[source,java] +---- +@Entity +@DiscriminatorColumn(discriminatorType=CHAR, name="kind") +@DiscriminatorValue('P') +class Person { ... } + +@Entity +@DiscriminatorValue('A') +class Author { ... } +---- + +We don't need to explicitly specify `@Inheritance(strategy=SINGLE_TABLE)`, since that's the default. + +// [[multiple-table-inheritance]] +// === Multiple table inheritance + +For `JOINED` inheritance we don't need a discriminator: + +[source,java] +---- +@Entity +@Inheritance(strategy=JOINED) +class Person { ... } + +@Entity +class Author { ... } +---- + +[TIP] +// .Discriminator columns for `JOINED` inheritance +==== +However, we can add a discriminator column if we like, and in that case the generated SQL for polymorphic queries will be slightly simpler. +==== + +Similarly, for `TABLE_PER_CLASS` inheritance we have: + +[source,java] +---- +@Entity +@Inheritance(strategy=TABLE_PER_CLASS) +class Person { ... } + +@Entity +class Author { ... } +---- + +[NOTE] +// .Discriminator columns for `TABLE_PER_CLASS` inheritance +==== +Hibernate doesn't allow discriminator columns for `TABLE_PER_CLASS` inheritance mappings, since they would make no sense, and offer no advantage. +==== + +Notice that in this last case, a polymorphic association like: + +[source,java] +---- +@ManyToOne Person person; +---- + +is a bad idea, since it's impossible to create a foreign key constraint that targets both mapped tables. + +// [[mixing-inheritance]] +// === Mixed inheritance +// +// Hibernate doesn't support mixing ``InheritanceType``s within a single entity hierarchy. +// However, it's possible to emulate a mix of `SINGLE_TABLE` and `JOINED` inheritance using the `@SecondaryTable` annotation. + +[[table-mappings]] +=== Mapping to tables + +The following annotations specify exactly how elements of the domain model map to tables of the relational model: + +.Annotations for mapping tables +[%autowidth.stretch] +|=== +| Annotation | Purpose + +| `@Table` | Map an entity class to its primary table +| `@SecondaryTable` | Define a secondary table for an entity class +| `@JoinTable` | Map a many-to-many or many-to-one association to its association table +| `@CollectionTable` | Map an `@ElementCollection` to its table +|=== + +The first two annotations are used to map an entity to its _primary table_ and, optionally, one or more _secondary tables_. + +[[entity-table-mappings]] +=== Mapping entities to tables + +By default, an entity maps to a single table, which may be specified using `@Table`: + +[source,java] +---- +@Entity +@Table(name="People") +class Person { ... } +---- + +However, the `@SecondaryTable` annotation allows us to spread its attributes across multiple _secondary tables_. + +[source,java] +---- +@Entity +@Table(name="Books") +@SecondaryTable(name="Editions") +class Book { ... } +---- + +The `@Table` annotation can do more than just specify a name: + +.`@Table` annotation members +[%breakable,cols="20,~"] +|=== +| Annotation member | Purpose + +| `name` | The name of the mapped table +| `schema` 💀 | The schema to which the table belongs +| `catalog` 💀 | The catalog to which the table belongs +| `uniqueConstraints` | One or more `@UniqueConstraint` annotations declaring multi-column unique constraints +| `indexes` | One or more `@Index` annotations each declaring an index +|=== + +[%unbreakable] +[TIP] +// .If you don't need to, don't hardcode the schema and catalog +==== +It only makes sense to explicitly specify the `schema` in annotations if the domain model is spread across multiple schemas. + +Otherwise, it's a bad idea to hardcode the schema (or catalog) in a `@Table` annotation. +Instead: + +- set the configuration property `hibernate.default_schema` (or `hibernate.default_catalog`), or +- simply specify the schema in the JDBC connection URL. +==== + +The `@SecondaryTable` annotation is even more interesting: + +.`@SecondaryTable` annotation members +[%breakable,cols="20,~"] +|=== +| Annotation member | Purpose + +| `name` | The name of the mapped table +| `schema` 💀 | The schema to which the table belongs +| `catalog` 💀 | The catalog to which the table belongs +| `uniqueConstraints` | One or more `@UniqueConstraint` annotations declaring multi-column unique constraints +| `indexes` | One or more `@Index` annotations each declaring an index +| `pkJoinColumns` | One or more `@PrimaryKeyJoinColumn` annotations, specifying <> +| `foreignKey` | An `@ForeignKey` annotation specifying the name of the `FOREIGN KEY` constraint on the ``@PrimaryKeyJoinColumn``s +|=== + +[TIP] +==== +Using `@SecondaryTable` on a subclass in a `SINGLE_TABLE` entity inheritance hierarchy gives us a sort of mix of `SINGLE_TABLE` with `JOINED` inheritance. +==== + +[[join-table-mappings]] +=== Mapping associations to tables + +The `@JoinTable` annotation specifies an _association table_, that is, a table holding foreign keys of both associated entities. +This annotation is usually used with `@ManyToMany` associations: + +[source,java] +---- +@Entity +class Book { + ... + + @ManyToMany + @JoinTable(name="BooksAuthors") + Set authors; + + ... +} +---- + +But it's even possible to use it to map a `@ManyToOne` or `@OneToOne` association to an association table. + +[source,java] +---- +@Entity +class Book { + ... + + @ManyToOne(fetch=LAZY) + @JoinTable(name="BookPublisher") + Publisher publisher; + + ... +} +---- + +Here, there should be a `UNIQUE` constraint on one of the columns of the association table. + +[source,java] +---- +@Entity +class Author { + ... + + @OneToOne(optional=false, fetch=LAZY) + @JoinTable(name="AuthorPerson") + Person author; + + ... +} +---- + +Here, there should be a `UNIQUE` constraint on _both_ columns of the association table. + +.`@JoinTable` annotation members +[%breakable,cols="20,~"] +|=== +| Annotation member | Purpose + +| `name` | The name of the mapped association table +| `schema` 💀 | The schema to which the table belongs +| `catalog` 💀 | The catalog to which the table belongs +| `uniqueConstraints` | One or more `@UniqueConstraint` annotations declaring multi-column unique constraints +| `indexes` | One or more `@Index` annotations each declaring an index +| `joinColumns` | One or more `@JoinColumn` annotations, specifying <> to the table of the owning side +| `inverseJoinColumns` | One or more `@JoinColumn` annotations, specifying <> to the table of the unowned side +| `foreignKey` | An `@ForeignKey` annotation specifying the name of the `FOREIGN KEY` constraint on the ``joinColumns``s +| `inverseForeignKey` | An `@ForeignKey` annotation specifying the name of the `FOREIGN KEY` constraint on the ``inverseJoinColumns``s +|=== + +To better understand these annotations, we must first discuss column mappings in general. + +[[column-mappings]] +=== Mapping to columns + +These annotations specify how elements of the domain model map to columns of tables in the relational model: + +.Annotations for mapping columns +[%autowidth.stretch] +|=== +| Annotation | Purpose + +| `@Column` | Map an attribute to a column +| `@JoinColumn` | Map an association to a foreign key column +| `@PrimaryKeyJoinColumn` | Map the primary key used to join a secondary table with its primary, or a subclass table in `JOINED` inheritance with its root class table +| `@OrderColumn` | Specifies a column that should be used to maintain the order of a `List`. +| `@MapKeyColumn` | Specified a column that should be used to persist the keys of a `Map`. +|=== + +We use the `@Column` annotation to map basic attributes. + +[[regular-column-mappings]] +=== Mapping basic attributes to columns + +The `@Column` annotation is not only useful for specifying the column name. + +.`@Column` annotation members +[%breakable,cols="20,~"] +|=== +| Annotation member | Purpose + +| `name` | The name of the mapped column +| `table` | The name of the table to which this column belongs +| `length` | The length of a `VARCHAR`, `CHAR`, or `VARBINARY` column type +| `precision` | The decimal digits of precision of a `FLOAT`, `DECIMAL`, `NUMERIC`, or `TIME`, or `TIMESTAMP` column type +| `scale` | The scale of a `DECIMAL` or `NUMERIC` column type, the digits of precision that occur to the right of the decimal point +| `unique` | Whether the column has a `UNIQUE` constraint +| `nullable` | Whether the column has a `NOT NULL` constraint +| `insertable` | Whether the column should appear in generated SQL `INSERT` statements +| `updatable` | Whether the column should appear in generated SQL `UPDATE` statements +| `columnDefinition` 💀| A DDL fragment that should be used to declare the column +|=== + +[TIP] +// .Use of `columnDefinition` results in unportable DDL +==== +We no longer recommend the use of `columnDefinition` since it results in unportable DDL. +Hibernate has much better ways to customize the generated DDL using techniques that result in portable behavior across different databases. +==== + +Here we see four different ways to use the `@Column` annotation: + +[source,java] +---- +@Entity +@Table(name="Books") +@SecondaryTable(name="Editions") +class Book { + @Id @GeneratedValue + @Column(name="bookId") // customize column name + Long id; + + @Column(length=100, nullable=false) // declare column as VARCHAR(100) NOT NULL + String title; + + @Column(length=17, unique=true, nullable=false) // declare column as VARCHAR(17) NOT NULL UNIQUE + String isbn; + + @Column(table="Editions", updatable=false) // column belongs to the secondary table, and is never updated + int edition; +} +---- + +We don't use `@Column` to map associations. + +[[join-column-mappings]] +=== Mapping associations to foreign key columns + +The `@JoinColumn` annotation is used to customize a foreign key column. + +.`@JoinColumn` annotation members +[%breakable,cols="20,~"] +|=== +| Annotation member | Purpose + +| `name` | The name of the mapped foreign key column +| `table` | The name of the table to which this column belongs +| `referencedColumnName` | The name of the column to which the mapped foreign key column refers +| `unique` | Whether the column has a `UNIQUE` constraint +| `nullable` | Whether the column has a `NOT NULL` constraint +| `insertable` | Whether the column should appear in generated SQL `INSERT` statements +| `updatable` | Whether the column should appear in generated SQL `UPDATE` statements +| `columnDefinition` 💀| A DDL fragment that should be used to declare the column +| `foreignKey` | A `@ForeignKey` annotation specifying the name of the `FOREIGN KEY` constraint +|=== + +A foreign key column doesn't necessarily have to refer to the primary key of the referenced table. +It's quite acceptable for the foreign key to refer to any other unique key of the referenced entity, even to a unique key of a secondary table. + +Here we see how to use `@JoinColumn` to define a `@ManyToOne` association mapping a foreign key column which refers to the `@NaturalId` of `Book`: + +[source,java] +---- +@Entity +@Table(name="Items") +class Item { + ... + + @ManyToOne(optional=false) // implies nullable=false + @JoinColumn(name = "bookIsbn", referencedColumnName = "isbn", // a reference to a non-PK column + foreignKey = @ForeignKey(name="ItemsToBooksBySsn")) // supply a name for the FK constraint + Book book; + + ... +} +---- + +In case this is confusing: + +- `bookIsbn` is the name of the foreign key column in the `Items` table, +- it refers to a unique key `isbn` in the `Books` table, and +- it has a foreign key constraint named `ItemsToBooksBySsn`. + +Note that the `foreignKey` member is completely optional and only affects DDL generation. + +[TIP] +// .Foreign key constraint names +==== +If you don't supply an explicit name using `@ForeignKey`, Hibernate will generate a quite ugly name. +The reason for this is that the maximum length of foreign key names on some databases is extremely constrained, and we need to avoid collisions. +To be fair, this is perfectly fine if you're only using the generated DDL for testing. +==== + +For composite foreign keys we might have multiple `@JoinColumn` annotations: + +[source,java] +---- +@Entity +@Table(name="Items") +class Item { + ... + + @ManyToOne(optional=false) + @JoinColumn(name = "bookIsbn", referencedColumnName = "isbn") + @JoinColumn(name = "bookPrinting", referencedColumnName = "printing") + Book book; + + ... +} +---- + +If we need to specify the `@ForeignKey`, this starts to get a bit messy: + +[source,java] +---- +@Entity +@Table(name="Items") +class Item { + ... + + @ManyToOne(optional=false) + @JoinColumns(value = {@JoinColumn(name = "bookIsbn", referencedColumnName = "isbn"), + @JoinColumn(name = "bookPrinting", referencedColumnName = "printing")}, + foreignKey = @ForeignKey(name="ItemsToBooksBySsn")) + Book book; + + ... +} +---- + +For associations mapped to a `@JoinTable`, fetching the association requires two joins, and so we must declare the ``@JoinColumn``s inside the `@JoinTable` annotation: + +[source,java] +---- +@Entity +class Book { + @Id @GeneratedValue + Long id; + + @ManyToMany + @JoinTable(joinColumns=@JoinColumn(name="bookId"), + inverseJoinColumns=@joinColumn(name="authorId"), + foreignKey=@ForeignKey(name="BooksToAuthors")) + Set authors; + + ... +} +---- + +Again, the `foreignKey` member is optional. + +[[primary-key-column-mappings]] +=== Mapping primary key joins between tables + +The `@PrimaryKeyJoinColumn` is a special-purpose annotation for mapping: + +- the primary key column of a `@SecondaryTable`—which is also a foreign key referencing the primary table, or +- the primary key column of the primary table mapped by a subclass in a `JOINED` inheritance hierarchy—which is also a foreign key referencing the primary table mapped by the root entity. + +.`@PrimaryKeyJoinColumn` annotation members +[%breakable,cols="20,~"] +|=== +| Annotation member | Purpose + +| `name` | The name of the mapped foreign key column +| `referencedColumnName` | The name of the column to which the mapped foreign key column refers +| `columnDefinition` 💀| A DDL fragment that should be used to declare the column +| `foreignKey` | A `@ForeignKey` annotation specifying the name of the `FOREIGN KEY` constraint +|=== + +When mapping a subclass table primary key, we place the `@PrimaryKeyJoinColumn` annotation on the entity class: + +[source,java] +---- +@Entity +@Table(name="People") +@Inheritance(strategy=JOINED) +class Person { ... } + +@Entity +@Table(name="Authors") +@PrimaryKeyJoinColumn(name="personId") // the primary key of the Authors table +class Author { ... } +---- + +But to map a secondary table primary key, the `@PrimaryKeyJoinColumn` annotation must occur inside the `@SecondaryTable` annotation: + +[source,java] +---- +@Entity +@Table(name="Books") +@SecondaryTable(name="Editions", + pkJoinColumns = @PrimaryKeyJoinColumn(name="bookId")) // the primary key of the Editions table +class Book { + @Id @GeneratedValue + @Column(name="bookId") // the name of the primary key of the Books table + Long id; + + ... +} +---- + +[[column-lengths]] +=== Column lengths and adaptive column types + +Hibernate automatically adjusts the column type used in generated DDL based on the column length specified by the `@Column` annotation. +So we don't usually need to explicitly specify that a column should be of type `TEXT` or `CLOB`—or worry about the parade of `TINYTEXT`, `MEDIUMTEXT`, `TEXT`, `LONGTEXT` types on MySQL—because Hibernate will automatically select one of those types if required to accommodate a string of the `length` we specify. + +The constant values defined in the class `org.hibernate.Length` are very helpful here: + +.Predefined column lengths +[%breakable,cols="10,12,~"] +|=== +| Constant | Value | Description + +| `DEFAULT` | 255 | The default length of a `VARCHAR` or `VARBINARY` column when none is explicitly specified +| `LONG` | 32600 | The largest column length for a `VARCHAR` or `VARBINARY` that is allowed on every database Hibernate supports +| `LONG16` | 32767 | The maximum length that can be represented using 16 bits (but this length is too large for a `VARCHAR` or `VARBINARY` column on for some database) +| `LONG32` | 2147483647 | The maximum length for a Java string +|=== + +We can use these constants in the `@Column` annotation: + +[source,java] +---- +@Column(length=LONG) +String text; + +@Column(length=LONG32) +byte[] binaryData; +---- + +This is usually all you need to do to make use of large object types in Hibernate. + +[[lobs]] +=== LOBs + +JPA provides a `@Lob` annotation which specifies that a field should be persisted as a `BLOB` or `CLOB`. + +.Semantics of the `@Lob` annotation +**** +What the spec actually says is that the field should be persisted + +> ...as a large object to a database-supported large object type. + +It's quite unclear what this means, and the spec goes on to say that + +> ...the treatment of the `Lob` annotation is provider-dependent... + +which doesn't help much. +**** + +Hibernate interprets this annotation in what we think is the most reasonable way. +In Hibernate, an attribute annotated `@Lob` will be written to JDBC using the `setClob()` or `setBlob()` method of `PreparedStatement`, and will be read from JDBC using the `getClob()` or `getBlob()` method of `ResultSet`. + +Now, the use of these JDBC methods is usually unnecessary! +JDBC drivers are perfectly capable of converting between `String` and `CLOB` or between `byte[]` and `BLOB`. +So unless you specifically need to use these JDBC LOB APIs, you _don't_ need the `@Lob` annotation. + +Instead, as we just saw in <>, all you need is to specify a large enough column `length` to accommodate the data you plan to write to that column. + +[%unbreakable] +[WARNING] +// .PostgreSQL `BYTEA` and `TEXT` +==== +Unfortunately, the driver for PostgreSQL doesn't allow `BYTEA` or `TEXT` columns to be read via the JDBC LOB APIs. + +This limitation of the Postgres driver has resulted in a whole cottage industry of bloggers and stackoverflow question-answerers recommending convoluted ways to hack the Hibernate `Dialect` for Postgres to allow an attribute annotated `@Lob` to be written using `setString()` and read using `getString()`. + +But simply removing the `@Lob` annotation has exactly the same effect. + +Conclusion: + +- on PostgreSQL, `@Lob` always means the `OID` type, +- `@Lob` should never be used to map columns of type `BYTEA` or `TEXT`, and +- please don't believe everything you read on stackoverflow. +==== + +Finally, as an alternative, Hibernate lets you declare an attribute of type `java.sql.Blob` or `java.sql.Clob`. + +[source,java] +---- +@Entity +class Book { + ... + Clob text; + Blob coverArt; + .... +} +---- + +The advantage is that a `java.sql.Clob` or `java.sql.Blob` can in principle index up to 2^63^ characters or bytes, much more data than you can fit in a Java `String` or `byte[]` array (or in your computer). + +To assign a value to these fields, we'll need to use a `LobHelper`. +We can get one from the `Session`: + +[source,java] +---- +LobHelper helper = session.getLobHelper(); +book.text = helper.createClob(text); +book.coverArt = helper.createBlob(image); +---- + +In principle, the `Blob` and `Clob` objects provide efficient ways to read or stream LOB data from the server. + +[source,java] +---- +Book book = session.find(Book.class, bookId); +String text = book.text.getSubString(1, textLength); +InputStream bytes = book.images.getBinaryStream(); +---- + +Of course, the behavior here depends very much on the JDBC driver, and so we really can't promise that this is a sensible thing to do on your database. + +[[mapping-embeddables]] +=== Mapping embeddable types to UDTs or to JSON + +There's a couple of alternative ways to represent an embeddable type on the database side. + +[discrete] +==== Embeddables as UDTs + +First, a really nice option, at least in the case of Java record types, and for databases which support _user-defined types_ (UDTs), is to define a UDT which represents the record type. +Hibernate 6 makes this really easy. +Just annotate the record type, or the attribute which holds a reference to it, with the new `@Struct` annotation: + +[source,java] +---- +@Embeddable +@Struct(name="PersonName") +record Name(String firstName, String middleName, String lastName) {} +---- +[source,java] +---- +@Entity +class Person { + ... + Name name; + ... +} +---- + +This results in the following UDT: + +[source,sql] +---- +create type PersonName as (firstName varchar(255), middleName varchar(255), lastName varchar(255)) +---- + +And the `name` column of the `Author` table will have the type `PersonName`. + +[discrete] +==== Embeddables to JSON + +A second option that's available is to map the embeddable type to a `JSON` (or `JSONB`) column. +Now, this isn't something we would exactly _recommend_ if you're defining a data model from scratch, but it's at least useful for mapping pre-existing tables with JSON-typed columns. +Since embeddable types are nestable, we can map some JSON formats this way, and even query JSON properties using HQL. + +[NOTE] +==== +At this time, JSON arrays are not supported! +==== + +To map an attribute of embeddable type to JSON, we must annotate the attribute `@JdbcTypeCode(SqlTypes.JSON)`, instead of annotating the embeddable type. +But the embeddable type `Name` should still be annotated `@Embeddable` if we want to query its attributes using HQL. + +[source,java] +---- +@Embeddable +record Name(String firstName, String middleName, String lastName) {} +---- +[source,java] +---- +@Entity +class Person { + ... + @JdbcTypeCode(SqlTypes.JSON) + Name name; + ... +} +---- + +We also need to add Jackson or an implementation of JSONB—for example, Yasson—to our runtime classpath. +To use Jackson we could add this line to our Gradle build: + +[source,groovy] +---- +runtimeOnly 'com.fasterxml.jackson.core:jackson-databind:{jacksonVersion}' +---- + +Now the `name` column of the `Author` table will have the type `jsonb`, and Hibernate will automatically use Jackson to serialize a `Name` to and from JSON format. + +[[miscellaneous-mappings]] +=== Summary of SQL column type mappings + +So, as we've seen, there are quite a few annotations that affect the mapping of Java types to SQL column types in DDL. +Here we summarize the ones we've just seen in the second half of this chapter, along with some we already mentioned in earlier chapters. + +.Annotations for mapping SQL column types +[%autowidth.stretch] +|=== +| Annotation | Interpretation + +| `@Enumerated` | Specify how an `enum` type should be persisted +| `@Nationalized` | Use a nationalized character type: `NCHAR`, `NVARCHAR`, or `NCLOB` +| `@Lob` 💀 | Use JDBC LOB APIs to read and write the annotated attribute +| `@Array` | Map a collection to a SQL `ARRAY` type of the specified length +| `@Struct` | Map an embeddable to a SQL UDT with the given name +| `@TimeZoneStorage` | Specify how the time zone information should be persisted +| `@JdbcType` or `@JdbcTypeCode` | Use an implementation of `JdbcType` to map an arbitrary SQL type +|=== + +In addition, there are some configuration properties which have a _global_ affect on how basic types map to SQL column types: + +.Type mapping settings +[%autowidth.stretch] +|=== +| Configuration property name | Purpose + +| `hibernate.use_nationalized_character_data` | Enable use of nationalized character types by default +| `hibernate.type.preferred_boolean_jdbc_type` | Specify the default SQL column type for mapping `boolean` +| `hibernate.type.preferred_uuid_jdbc_type` | Specify the default SQL column type for mapping `UUID` +| `hibernate.type.preferred_duration_jdbc_type` | Specify the default SQL column type for mapping `Duration` +| `hibernate.type.preferred_instant_jdbc_type` | Specify the default SQL column type for mapping `Instant` +| `hibernate.timezone.default_storage` | Specify the default strategy for storing time zone information +|=== + +[TIP] +==== +These are _global_ settings and thus quite clumsy. +We recommend against messing with any of these settings unless you have a really good reason for it. +==== + +There's one more topic we would like to cover in this chapter. + +[[mapping-formulas]] +=== Mapping to formulas + +Hibernate lets us map an attribute of an entity to a SQL formula involving columns of the mapped table. +Thus, the attribute is a sort of "derived" value. + +.Annotations for mapping formulas +[%autowidth.stretch] +|=== +| Annotation | Purpose + +| `@Formula` | Map an attribute to a SQL formula +| `@JoinFormula` | Map an association to a SQL formula +| `@DiscriminatorFormula` | Use a SQL formula as the discriminator in <>. +|=== + +For example: + +[source,java] +---- +@Entity +class Order { + ... + @Column(name = "sub_total", scale=2, precision=8) + BigDecimal subTotal; + + @Column(name = "tax", scale=4, precision=4) + BigDecimal taxRate; + + @Formula("sub_total * (1.0 + tax)") + BigDecimal totalWithTax; + ... +} +---- \ No newline at end of file diff --git a/documentation/src/main/asciidoc/introduction/Preface.adoc b/documentation/src/main/asciidoc/introduction/Preface.adoc new file mode 100644 index 000000000000..716f6905de69 --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Preface.adoc @@ -0,0 +1,37 @@ +[[preface]] +== Preface + +Hibernate 6 is a major redesign of the world's most popular and feature-rich ORM solution. +The redesign has touched almost every subsystem of Hibernate, including the APIs, mapping annotations, and the query language. +This new Hibernate is more powerful, more robust, and more typesafe. + +With so many improvements, it's very difficult to summarize the significance of this work. +But the following general themes stand out. +Hibernate 6: + +- finally takes advantage of the advances in relational databases over the past decade, updating the query language to support a raft of new constructs in modern dialects of SQL, +- exhibits much more consistent behavior across different databases, greatly improving portability, and generates much higher-quality DDL from dialect-independent code, +- improves error reporting by more scrupulous validation of queries _before_ access to the database, +- improves the type-safety of O/R mapping annotations, clarifies the separation of API, SPI, and internal implementation, and fixes some long-standing architectural flaws, +- removes or deprecates legacy APIs, laying the foundation for future evolution, and +- makes far better use of Javadoc, putting much more information at the fingertips of developers. + +Hibernate 6 and Hibernate Reactive are now core components of Quarkus 3, the most exciting new environment for cloud-native development in Java, and Hibernate remains the persistence solution of choice for almost every major Java framework or server. + +Unfortunately, the changes in Hibernate 6 have obsoleted much of the information about Hibernate that's available in books, in blog posts, and on stackoverflow. + + +This guide is an up-to-date, high-level discussion of the current set of Hibernate features. It does not attempt +to cover every feature and should be used in conjunction with other documentation - + +- Hibernate's extensive link:{doc-javadoc-url}[Javadoc] +- The link:{doc-query-language-url}[Hibernate Query Language Guide] +- The Hibernate link:{doc-user-guide-url}[User Guide]. + +[NOTE] +==== +The Hibernate User Guide provides detailed discussion about most aspects of Hibernate. +But with so much information to cover, readability is difficult to achieve - it should be considered more of a reference guide. + +Where applicable, this guide will provide links to detailed User Guide content within its covered topics. +==== diff --git a/documentation/src/main/asciidoc/introduction/Tuning.adoc b/documentation/src/main/asciidoc/introduction/Tuning.adoc new file mode 100644 index 000000000000..8db509d1e6af --- /dev/null +++ b/documentation/src/main/asciidoc/introduction/Tuning.adoc @@ -0,0 +1,1018 @@ +[[tuning-and-performance]] +== Tuning and performance + +Once you have a program up and running using Hibernate to access +the database, it's inevitable that you'll find places where performance is +disappointing or unacceptable. + +Fortunately, most performance problems are relatively easy to solve with +the tools that Hibernate makes available to you, as long as you keep a +couple of simple principles in mind. + +First and most important: the reason you're using Hibernate is +that it makes things easier. If, for a certain problem, it's making +things _harder_, stop using it. Solve this problem with a different tool +instead. + +IMPORTANT: Just because you're using Hibernate in your program doesn't mean +you have to use it _everywhere_. + +Second: there are two main potential sources of performance bottlenecks in +a program that uses Hibernate: + +- too many round trips to the database, and +- memory consumption associated with the first-level (session) cache. + +So performance tuning primarily involves reducing the number of accesses +to the database, and/or controlling the size of the session cache. + +But before we get to those more advanced topics, we should start by tuning +the connection pool. + +[[connection-pool]] +=== Tuning the connection pool + +The connection pool built in to Hibernate is suitable for testing, but isn't intended for use in production. +Instead, Hibernate supports a range of different connection pools, including our favorite, Agroal. + +To select and configure Agroal, you'll need to set some extra configuration properties, in addition to the settings we already saw in <>. +Properties with the prefix `hibernate.agroal` are passed through to Agroal: + +[source,properties] +---- +# configure Agroal connection pool +hibernate.agroal.maxSize 20 +hibernate.agroal.minSize 10 +hibernate.agroal.acquisitionTimeout PT1s +hibernate.agroal.reapTimeout PT10s +---- + +As long as you set at least one property with the prefix `hibernate.agroal`, the `AgroalConnectionProvider` will be selected automatically. +There's many to choose from: + +.Settings for configuring Agroal +[%breakable,cols="37,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.agroal.maxSize` | The maximum number of connections present on the pool +| `hibernate.agroal.minSize` | The minimum number of connections present on the pool +| `hibernate.agroal.initialSize` | The number of connections added to the pool when it is started +| `hibernate.agroal.maxLifetime` | The maximum amount of time a connection can live, after which it is removed from the pool +| `hibernate.agroal.acquisitionTimeout` | The maximum amount of time a thread can wait for a connection, after which an exception is thrown instead +| `hibernate.agroal.reapTimeout` | The duration for eviction of idle connections +| `hibernate.agroal.leakTimeout` | The duration of time a connection can be held without causing a leak to be reported +| `hibernate.agroal.idleValidationTimeout` | A foreground validation is executed if a connection has been idle on the pool for longer than this duration +| `hibernate.agroal.validationTimeout` | The interval between background validation checks +| `hibernate.agroal.initialSql` | A SQL command to be executed when a connection is created +|=== + +The following settings are common to all connection pools supported by Hibernate: + +.Common settings for connection pools +[%breakable,cols="37,~"] +|=== +| `hibernate.connection.autocommit` | The default autocommit mode +| `hibernate.connection.isolation` | The default transaction isolation level +|=== + +.Container-managed datasources +**** +In a container environment, you usually don't need to configure a connection pool through Hibernate. +Instead, you'll use a container-managed datasource, as we saw in <>. +**** + +[[statement-batching]] +=== Enabling statement batching + +An easy way to improve performance of some transactions, with almost no work at all, is to turn on automatic DML statement batching. +Batching only helps in cases where a program executes many inserts, updates, or deletes against the same table in a single transaction. + +All we need to do is set a single property: + +.Enabling JDBC batching +[%autowidth.stretch] +|=== +| Configuration property name | Purpose | Alternative + +| `hibernate.jdbc.batch_size` | Maximum batch size for SQL statement batching | `setJdbcBatchSize()` +|=== + +[TIP] +==== +Even better than DML statement batching is the use of HQL `update` or `delete` queries, or even native SQL that calls a stored procedure! +==== + +[[association-fetching]] +=== Association fetching + +:association-fetching: {doc-user-guide-url}#fetching + +Achieving high performance in ORM means minimizing the number of round trips to the database. This goal should be uppermost in your mind whenever you're writing data access code with Hibernate. The most fundamental rule of thumb in ORM is: + +- explicitly specify all the data you're going to need right at the start of a session/transaction, and fetch it immediately in one or two queries, +- and only then start navigating associations between persistent entities. + +image::images/fetching.png[Fetching process,width=700,align="center"] + +Without question, the most common cause of poorly-performing data access code in Java programs is the problem of _N+1 selects_. +Here, a list of _N_ rows is retrieved from the database in an initial query, and then associated instances of a related entity are fetched using _N_ subsequent queries. + +[IMPORTANT] +// .This problem is your responsibility +==== +This isn't a bug or limitation of Hibernate; this problem even affects typical handwritten JDBC code behind DAOs. +Only you, the developer, can solve this problem, because only you know ahead of time what data you're going to need in a given unit of work. +But that's OK. +Hibernate gives you all the tools you need. +==== + +In this section we're going to discuss different ways to avoid such "chatty" interaction with the database. + +Hibernate provides several strategies for efficiently fetching associations and avoiding _N+1_ selects: + +- _outer join fetching_—where an association is fetched using a `left outer join`, +- _batch fetching_—where an association is fetched using a subsequent `select` with a batch of primary keys, and +- _subselect fetching_—where an association is fetched using a subsequent `select` with keys re-queried in a subselect. + +Of these, you should almost always use outer join fetching. +But let's consider the alternatives first. + +[[batch-subselect-fetch]] +=== Batch fetching and subselect fetching + +Consider the following code: + +[source,java] +---- +List books = + session.createSelectionQuery("from Book order by isbn", Book.class) + .getResultList(); +books.forEach(book -> book.getAuthors().forEach(author -> out.println(book.title + " by " + author.name))); +---- + +This code is _very_ inefficient, resulting, by default, in the execution of _N+1_ `select` statements, where _n_ is the number of ``Book``s. + +Let's see how we can improve on that. + +[discrete] +===== SQL for batch fetching + +With batch fetching enabled, Hibernate might execute the following SQL on PostgreSQL: + +[source,sql] +---- +/* initial query for Books */ +select b1_0.isbn,b1_0.price,b1_0.published,b1_0.publisher_id,b1_0.title +from Book b1_0 +order by b1_0.isbn + +/* first batch of associated Authors */ +select a1_0.books_isbn,a1_1.id,a1_1.bio,a1_1.name +from Book_Author a1_0 + join Author a1_1 on a1_1.id=a1_0.authors_id +where a1_0.books_isbn = any (?) + +/* second batch of associated Authors */ +select a1_0.books_isbn,a1_1.id,a1_1.bio,a1_1.name +from Book_Author a1_0 + join Author a1_1 on a1_1.id=a1_0.authors_id +where a1_0.books_isbn = any (?) +---- + +The first `select` statement queries and retrieves ``Book``s. +The second and third queries fetch the associated ``Author``s in batches. +The number of batches required depends on the configured _batch size_. +Here, two batches were required, so two SQL statements were executed. + +[NOTE] +==== +The SQL for batch fetching looks slightly different depending on the database. +Here, on PostgreSQL, Hibernate passes a batch of primary key values as a SQL `ARRAY`. +==== + +[discrete] +===== SQL for subselect fetching + +On the other hand, with subselect fetching, Hibernate would execute this SQL: + +[source,sql] +---- +/* initial query for Books */ +select b1_0.isbn,b1_0.price,b1_0.published,b1_0.publisher_id,b1_0.title +from Book b1_0 +order by b1_0.isbn + +/* fetch all associated Authors */ +select a1_0.books_isbn,a1_1.id,a1_1.bio,a1_1.name +from Book_Author a1_0 + join Author a1_1 on a1_1.id=a1_0.authors_id +where a1_0.books_isbn in (select b1_0.isbn from Book b1_0) +---- + +Notice that the first query is re-executed in a subselect in the second query. +The execution of the subselect is likely to be relatively inexpensive, since the data should already be cached by the database. +Clever, huh? + +[discrete] +===== Enabling the use of batch or subselect fetching + +Both batch fetching and subselect fetching are disabled by default, but we may enable one or the other globally using properties. + +.Configuration settings to enable batch and subselect fetching +[%breakable,cols="32,~,28"] +|=== +| Configuration property name | Property value | Alternatives + +| `hibernate.default_batch_fetch_size` | A sensible batch size `>1` to enable batch fetching | `@BatchSize()`, `setFetchBatchSize()` +|=== + +Alternatively, we can enable one or the other in a given session: + +[source,java] +---- +session.setFetchBatchSize(5); +---- + +[%unbreakable] +[TIP] +==== +We may request subselect fetching more selectively by annotating a collection or many-valued association with the `@Fetch` annotation. +[source,java] +---- +@ManyToMany @Fetch(SUBSELECT) +Set authors; +---- +Note that `@Fetch(SUBSELECT)` has the same effect as `@Fetch(SELECT)`, except after execution of a HQL or criteria query. +But after query execution, `@Fetch(SUBSELECT)` is able to much more efficiently fetch associations. + +Later, we'll see how we can use <> to do this even more selectively. +==== + +That's all there is to it. +Too easy, right? + +Sadly, that's not the end of the story. +While batch fetching might _mitigate_ problems involving N+1 selects, it won't solve them. +The truly correct solution is to fetch associations using joins. +Batch fetching (or subselect fetching) can only be the _best_ solution in rare cases where outer join fetching would result in a cartesian product and a huge result set. + +But batch fetching and subselect fetching have one important characteristic in common: they can be performed _lazily_. +This is, in principle, pretty convenient. +When we query data, and then navigate an object graph, lazy fetching saves us the effort of planning ahead. +It turns out that this is a convenience we're going to have to surrender. + +[[join-fetch]] +=== Join fetching + +Outer join fetching is usually the best way to fetch associations, and it's what we use most of the time. +Unfortunately, by its very nature, join fetching simply can't be lazy. +So to make use of join fetching, we must plan ahead. +Our general advice is: + +TIP: Avoid the use of lazy fetching, which is often the source of N+1 selects. + +Now, we're not saying that associations should be mapped for eager fetching by default! +That would be a terrible idea, resulting in simple session operations that fetch almost the entire database. +Therefore: + +TIP: Most associations should be mapped for lazy fetching by default. + +It sounds as if this tip is in contradiction to the previous one, but it's not. +It's saying that you must explicitly specify eager fetching for associations precisely when and where they are needed. + +If we need eager join fetching in some particular transaction, we have four different ways to specify that. + +[cols="40,~"] +|=== +| Passing a JPA `EntityGraph` | We've already seen this in <> +| Specifying a named _fetch profile_ | We'll discuss this approach later in <> +| Using `left join fetch` in HQL/JPQL | See _A guide to Hibernate Query Language 6_ for details +| Using `From.fetch()` in a criteria query | Same semantics as `join fetch` in HQL +|=== + +Typically, a query is the most convenient option. +Here's how we can ask for join fetching in HQL: + +[source,java] +---- +List booksWithJoinFetchedAuthors = + session.createSelectionQuery("from Book join fetch authors order by isbn") + .getResultList(); +---- + +And this is the same query, written using the criteria API: + +[source,java] +---- +var builder = sessionFactory.getCriteriaBuilder(); +var query = builder.createQuery(Book.class); +var book = query.from(Book.class); +book.fetch(Book_.authors); +query.select(book); +query.orderBy(builder.asc(book.get(Book_.isbn))); +List booksWithJoinFetchedAuthors = + session.createSelectionQuery(query).getResultList(); +---- + +Either way, a single SQL `select` statement is executed: + +[source,sql] +---- +select b1_0.isbn,a1_0.books_isbn,a1_1.id,a1_1.bio,a1_1.name,b1_0.price,b1_0.published,b1_0.publisher_id,b1_0.title +from Book b1_0 + join (Book_Author a1_0 join Author a1_1 on a1_1.id=a1_0.authors_id) + on b1_0.isbn=a1_0.books_isbn +order by b1_0.isbn +---- + +Much better! + +Join fetching, despite its non-lazy nature, is clearly more efficient than either batch or subselect fetching, and this is the source of our recommendation to avoid the use of lazy fetching. + +[TIP] +==== +There's one interesting case where join fetching becomes inefficient: when we fetch two many-values associations _in parallel_. +Imagine we wanted to fetch both `Author.books` and `Author.royaltyStatements` in some unit of work. +Joining both collections in a single query would result in a cartesian product of tables, and a large SQL result set. +Subselect fetching comes to the rescue here, allowing us to fetch `books` using a join, and `royaltyStatements` using a single subsequent `select`. +==== + +Of course, an alternative way to avoid many round trips to the database is to cache the data we need in the Java client. +If we're expecting to find the associated data in a local cache, we probably don't need join fetching at all. + +[TIP] +==== +But what if we can't be _certain_ that all associated data will be in the cache? +In that case, we might be able to reduce the cost of cache misses by enabling batch fetching. +==== + +[[second-level-cache]] +=== The second-level cache + +:second-level-cache: {doc-user-guide-url}#caching + +A classic way to reduce the number of accesses to the database is to use a second-level cache, allowing data cached in memory to be shared between sessions. + +By nature, a second-level cache tends to undermine the ACID properties of transaction processing in a relational database. +We _don't_ use a distributed transaction with two-phase commit to ensure that changes to the cache and database happen atomically. +So a second-level cache is often by far the easiest way to improve the performance of a system, but only at the cost of making it much more difficult to reason about concurrency. +And so the cache is a potential source of bugs which are difficult to isolate and reproduce. + +Therefore, by default, an entity is not eligible for storage in the second-level cache. +We must explicitly mark each entity that will be stored in the second-level cache with the `@Cache` annotation from `org.hibernate.annotations`. + +But that's still not enough. +Hibernate does not itself contain an implementation of a second-level cache, so it's necessary to configure an external _cache provider_. + +[CAUTION] +// .Caching is disabled by default +==== +Caching is disabled by default. +To minimize the risk of data loss, we force you to stop and think before any entity goes into the cache. +==== + +Hibernate segments the second-level cache into named _regions_, one for each: + +- mapped entity hierarchy or +- collection role. + +For example, there might be separate cache regions for `Author`, `Book`, `Author.books`, and `Book.authors`. + +Each region is permitted its own policies for expiry, persistence, and replication. These policies must be configured externally to Hibernate. + +The appropriate policies depend on the kind of data an entity represents. For example, a program might have different caching policies for "reference" data, for transactional data, and for data used for analytics. Ordinarily, the implementation of those policies is the responsibility of the underlying cache implementation. + +[[enable-second-level-cache]] +=== Specifying which data is cached + +By default, no data is eligible for storage in the second-level cache. + +An entity hierarchy or collection role may be assigned a region using the `@Cache` annotation. +If no region name is explicitly specified, the region name is just the name of the entity class or collection role. + +[source,java] +---- +@Entity +@Cache(usage=NONSTRICT_READ_WRITE, region="Publishers") +class Publisher { + ... + + @Cache(usage=READ_WRITE, region="PublishedBooks") + @OneToMany(mappedBy=Book_.PUBLISHER) + Set books; + + ... +} +---- + +The cache defined by a `@Cache` annotation is automatically utilized by Hibernate to: + +- retrieve an entity by id when `find()` is called, or +- to resolve an association by id. + +[WARNING] +==== +The `@Cache` annotation must be specified on the _root class_ of an entity inheritance hierarchy. +It's an error to place it on a subclass entity. +==== + +The `@Cache` annotation always specifies a `CacheConcurrencyStrategy`, a policy governing access to the second-level cache by concurrent transactions. + +.Cache concurrency +[%breakable,cols="20,30,~"] +|=== +| Concurrency policy | Interpretation | Explanation + +| `READ_ONLY` a| +- Immutable data +- Read-only access +| Indicates that the cached object is immutable, and is never updated. If an entity with this cache concurrency is updated, an exception is thrown. + +This is the simplest, safest, and best-performing cache concurrency strategy. It's particularly suitable for so-called "reference" data. + +| `NONSTRICT_READ_WRITE` a| +- Concurrent updates are extremely improbable +- Read/write access with no locking +| Indicates that the cached object is sometimes updated, but that it's extremely unlikely that two transactions will attempt to update the same item of data at the same time. + +This strategy does not use locks. When an item is updated, the cache is invalidated both before and after completion of the updating transaction. But without locking, it's impossible to completely rule out the possibility of a second transaction storing or retrieving stale data in or from the cache during the completion process of the first transaction. + +| `READ_WRITE` a| +- Concurrent updates are possible but not common +- Read/write access using soft locks +a| Indicates a non-vanishing likelihood that two concurrent transactions attempt to update the same item of data simultaneously. + +This strategy uses "soft" locks to prevent concurrent transactions from retrieving or storing a stale item from or in the cache during the transaction completion process. A soft lock is simply a marker entry placed in the cache while the updating transaction completes. + +- A second transaction may not read the item from the cache while the soft lock is present, and instead simply proceeds to read the item directly from the database, exactly as if a regular cache miss had occurred. +- Similarly, the soft lock also prevents this second transaction from storing a stale item to the cache when it returns from its round trip to the database with something that might not quite be the latest version. + +| `TRANSACTIONAL` a| +- Concurrent updates are frequent +- Transactional access +| Indicates that concurrent writes are common, and the only way to maintain synchronization between the second-level cache and the database is via the use of a fully transactional cache provider. In this case, the cache and the database must cooperate via JTA or the XA protocol, and Hibernate itself takes on little responsibility for maintaining the integrity of the cache. +|=== + +Which policies make sense may also depend on the underlying second-level cache implementation. + +[%unbreakable] +[NOTE] +// .The JPA-defined `@Cacheable` annotation +==== +JPA has a similar annotation, named `@Cacheable`. +Unfortunately, it's almost useless to us, since: + +- it provides no way to specify any information about the nature of the cached entity and how its cache should be managed, and +- it may not be used to annotate associations, and so we can't even use it to mark collection roles as eligible for storage in the second-level cache. +==== + +[[natural-id-cache]] +=== Caching by natural id + +If our entity has a <>, we can enable an additional cache, which holds cross-references from natural id to primary id, by annotating the entity `@NaturalIdCache`. +By default, the natural id cache is stored in a dedicated region of the second-level cache, separate from the cached entity data. + +[source,java] +---- +@Entity +@Cache(usage=READ_WRITE, region="Book") +@NaturalIdCache(region="BookIsbn") +class Book { + ... + @NaturalId + String isbn; + + @NaturalId + int printing; + ... +} +---- + +This cache is utilized when the entity is retrieved using one of the operations of `Session` which performs <>. + +[NOTE] +==== +Since the natural id cache doesn't contain the actual state of the entity, it doesn't make sense to annotate an entity `@NaturalIdCache` unless it's already eligible for storage in the second-level cache, that is, unless it's also annotated `@Cache`. +==== + +It's worth noticing that, unlike the primary identifier of an entity, a natural id might be mutable. + +We must now consider a subtlety that often arises when we have to deal with so-called "reference data", that is, data which fits easily in memory, and doesn't change much. + +[[caching-and-fetching]] +=== Caching and association fetching + +Let's consider again our `Publisher` class: + +[source,java] +---- +@Cache(usage=NONSTRICT_READ_WRITE, region="Publishers") +@Entity +class Publisher { ... } +---- + +Data about publishers doesn't change very often, and there aren't so many of them. +Suppose we've set everything up so that the publishers are almost _always_ available in the second-level cache. + +Then in this case we need to think carefully about associations of type `Publisher`. + +[source,java] +---- +@ManyToOne +Publisher publisher; +---- + +There's no need for this association to be lazily fetched, since we're expecting it to be available in memory, so we won't set it `fetch=LAZY`. +But on the other hand, if we leave it marked for eager fetching then, by default, Hibernate will often fetch it using a join. +This places completely unnecessary load on the database. + +The solution is the `@Fetch` annotation: + +[source,java] +---- +@ManyToOne @Fetch(SELECT) +Publisher publisher; +---- + +By annotating the association `@Fetch(SELECT)`, we suppress join fetching, giving Hibernate a chance to find the associated `Publisher` in the cache. + +Therefore, we arrive at this rule of thumb: + +[TIP] +==== +Many-to-one associations to "reference data", or to any other data that will almost always be available in the cache, should be mapped `EAGER`,`SELECT`. + +Other associations, as we've <>, should be `LAZY`. +==== + +Once we've marked an entity or collection as eligible for storage in the second-level cache, we still need to set up an actual cache. + +[[second-level-cache-configuration]] +=== Configuring the second-level cache provider + +Configuring a second-level cache provider is a rather involved topic, and quite outside the scope of this document. +But in case it helps, we often test Hibernate with the following configuration, which uses EHCache as the cache implementation, as above in <>: + +:ehcache-config: https://www.ehcache.org/documentation/ + +.EHCache configuration +[%breakable,cols="35,~"] +|=== +| Configuration property name | Property value + +| `hibernate.cache.region.factory_class` | `jcache` +| `hibernate.javax.cache.uri` | `/ehcache.xml` +|=== + +If you're using EHCache, you'll also need to include an `ehcache.xml` file +that explicitly configures the behavior of each cache region belonging to +your entities and collections. +You'll find more information about configuring EHCache {ehcache-config}[here]. + +:caffeine: https://github.com/ben-manes/caffeine/ + +We may use any other implementation of JCache, such as {caffeine}[Caffeine]. +JCache automatically selects whichever implementation it finds on the classpath. +If there are multiple implementations on the classpath, we must disambiguate using: + +.Disambiguating the JCache implementation +[%breakable,cols="35,~"] +|=== +| Configuration property name | Property value + +| `hibernate.javax.cache.provider` a| The implementation of `javax.cache.spiCachingProvider`, for example: +[%breakable,cols="~,20"] +!=== +! `org.ehcache.jsr107.EhcacheCachingProvider` ! for EHCache +! `com.github.benmanes.caffeine.jcache.spi.CaffeineCachingProvider` ! for Caffeine +!=== +|=== + +Alternatively, to use Infinispan as the cache implementation, the following settings are required: + +:infinispan-hibernate: https://infinispan.org/docs/stable/titles/hibernate/hibernate.html + +.Infinispan provider configuration +[%breakable,cols="35,~"] +|=== +| Configuration property name | Property value + +| `hibernate.cache.region.factory_class` | `infinispan` +| `hibernate.cache.infinispan.cfg` a| Path to infinispan configuration file, for example: +[%breakable,cols="~,35"] +!=== +! `org/infinispan/hibernate/cache/commons/builder/infinispan-configs.xml` +! for a distributed cache +! `org/infinispan/hibernate/cache/commons/builder/infinispan-configs-local.xml` +! to test with local cache +!=== +|=== + +Infinispan is usually used when distributed caching is required. +There's more about using Infinispan with Hibernate {infinispan-hibernate}[here]. + +Finally, there's a way to globally disable the second-level cache: + +.Setting to disable caching +[%breakable,cols="35,~"] +|=== +| Configuration property name | Property value + +| `hibernate.cache.use_second_level_cache` | `true` to enable caching, or `false` to disable it +|=== + +When `hibernate.cache.region.factory_class` is set, this property defaults to `true`. + +[%unbreakable] +[TIP] +==== +This setting lets us easily disable the second-level cache completely when troubleshooting or profiling performance. +==== + +You can find much more information about the second-level cache in the {second-level-cache}[User Guide]. + +[[query-cache]] +=== Caching query result sets + +The caches we've described above are only used to optimize lookups by id or by natural id. +Hibernate also has a way to cache the result sets of queries, though this is only rarely an efficient thing to do. + +The query cache must be enabled explicitly: + +.Setting to enable the query cache +[%breakable,cols="35,~"] +|=== +| Configuration property name | Property value + +| `hibernate.cache.use_query_cache` | `true` to enable the query cache +|=== + +To cache the results of a query, call `SelectionQuery.setCacheable(true)`: + +[source,java] +---- +session.createQuery("from Product where discontinued = false") + .setCacheable(true) + .getResultList(); +---- + +By default, the query result set is stored in a cache region named `default-query-results-region`. +Since different queries should have different caching policies, it's common to explicitly specify a region name: + +[source,java] +---- +session.createQuery("from Product where discontinued = false") + .setCacheable(true) + .setCacheRegion("ProductCatalog") + .getResultList(); +---- + +A result set is cached together with a _logical timestamp_. +By "logical", we mean that it doesn't actually increase linearly with time, and in particular it's not the system time. + +When a `Product` is updated, Hibernate _does not_ go through the query cache and invalidate every cached result set that's affected by the change. +Instead, there's a special region of the cache which holds a logical timestamp of the most-recent update to each table. +This is called the _update timestamps cache_, and it's kept in the region `default-update-timestamps-region`. + +[CAUTION] +==== +It's _your responsibility_ to ensure that this cache region is configured with appropriate policies. +In particular, update timestamps should never expire or be evicted. +==== + +When a query result set is read from the cache, Hibernate compares its timestamp with the timestamp of each of the tables that affect the results of the query, and _only_ returns the result set if the result set isn't stale. +If the result set _is_ stale, Hibernate goes ahead and re-executes the query against the database and updates the cached result set. + +As is generally the case with any second-level cache, the query cache can break the ACID properties of transactions. + +[[second-level-cache-management]] +=== Second-level cache management + +For the most part, the second-level cache is transparent. +Program logic which interacts with the Hibernate session is unaware of the cache, and is not impacted by changes to caching policies. + +At worst, interaction with the cache may be controlled by specifying of an explicit `CacheMode`: + +[source,java] +---- +session.setCacheMode(CacheMode.IGNORE); +---- + +Or, using JPA-standard APIs: + +[source,java] +---- +entityManager.setCacheRetrieveMode(CacheRetrieveMode.BYPASS); +entityManager.setCacheStoreMode(CacheStoreMode.BYPASS); +---- + +The JPA-defined cache modes come in two flavors: `CacheRetrieveMode` and `CacheStoreMode`. + +.JPA-defined cache retrieval modes +[%breakable,cols="30,~"] +|=== +| Mode | Interpretation + +| `CacheRetrieveMode.USE` | Read data from the cache if available +| `CacheRetrieveMode.BYPASS` | Don't read data from the cache; go direct to the database +|=== + +We might select `CacheRetrieveMode.BYPASS` if we're concerned about the possibility of reading stale data from the cache. + +.JPA-defined cache storage modes +[%breakable,cols="30,~"] +|=== +| Mode | Interpretation + +| `CacheStoreMode.USE` | Write data to the cache when read from the database or when modified; do not update already-cached items when reading +| `CacheStoreMode.REFRESH` | Write data to the cache when read from the database or when modified; always update cached items when reading +| `CacheStoreMode.BYPASS` | Don't write data to the cache +|=== + +We should select `CacheStoreMode.BYPASS` if we're querying data that doesn't need to be cached. + +[%unbreakable] +[TIP] +// .A good time to `BYPASS` the cache +==== +It's a good idea to set the `CacheStoreMode` to `BYPASS` just before running a query which returns a large result set full of data that we don't expect to need again soon. +This saves work, and prevents the newly-read data from pushing out the previously cached data. +==== + +In JPA we would use this idiom: + +[source,java] +---- +entityManager.setCacheStoreMode(CacheStoreMode.BYPASS); +List allpubs = + entityManager.createQuery("from Publisher", Publisher.class) + .getResultList(); +entityManager.setCacheStoreMode(CacheStoreMode.USE); +---- + +But Hibernate has a better way: + +[source,java] +---- +List allpubs = + session.createSelectionQuery("from Publisher", Publisher.class) + .setCacheStoreMode(CacheStoreMode.BYPASS) + .getResultList(); +---- + +A Hibernate `CacheMode` packages a `CacheRetrieveMode` with a `CacheStoreMode`. + +.Hibernate cache modes and JPA equivalents +[%breakable,cols="30,~"] +|=== +| Hibernate `CacheMode` | Equivalent JPA modes + +| `NORMAL` | `CacheRetrieveMode.USE`, `CacheStoreMode.USE` +| `IGNORE` | `CacheRetrieveMode.BYPASS`, `CacheStoreMode.BYPASS` +| `GET` | `CacheRetrieveMode.USE`, `CacheStoreMode.BYPASS` +| `PUT` | `CacheRetrieveMode.BYPASS`, `CacheStoreMode.USE` +| `REFRESH` | `CacheRetrieveMode.REFRESH`, `CacheStoreMode.BYPASS` +|=== + +There's no particular reason to prefer Hibernate's `CacheMode` over the JPA equivalents. +This enumeration only exists because Hibernate had cache modes long before they were added to JPA. + +[%unbreakable] +[TIP] +==== +For "reference" data, that is, for data which is expected to always be found in the second-level cache, it's a good idea to _prime_ the cache at startup. +There's a really easy way to do this: just execute a query immediately after obtaining the +`EntityManager` or `SessionFactory`. + +[source,java] +---- +SessionFactory sessionFactory = + setupHibernate(new Configuration()) + .buildSessionFactory(); +// prime the second-level cache +sessionFactory.inSession(session -> { + session.createSelectionQuery("from Countries")) + .setReadOnly(true) + .getResultList(); + session.createSelectionQuery("from Product where discontinued = false")) + .setReadOnly(true) + .getResultList(); +}); + +---- +==== + +Very occasionally, it's necessary or advantageous to control the cache explicitly, for example, to evict some data that we know to be stale. +The `Cache` interface allows programmatic eviction of cached items. + +[source,java] +---- +sessionFactory.getCache().evictEntityData(Book.class, bookId); +---- + +[%unbreakable] +[CAUTION] +// .Second-level cache management is not transaction-aware +==== +Second-level cache management via the `Cache` interface is not transaction-aware. +None of the operations of `Cache` respect any isolation or transactional semantics associated with the underlying caches. In particular, eviction via the methods of this interface causes an immediate "hard" removal outside any current transaction and/or locking scheme. +==== + +Ordinarily, however, Hibernate automatically evicts or updates cached data after modifications, and, in addition, cached data which is unused will eventually be expired according to the configured policies. + +This is quite different to what happens with the first-level cache. + +[[session-cache-management]] +=== Session cache management + +Entity instances aren't automatically evicted from the session cache when they're no longer needed. +Instead, they stay pinned in memory until the session they belong to is discarded by your program. + +The methods `detach()` and `clear()` allow you to remove entities from the session cache, making them available for garbage collection. +Since most sessions are rather short-lived, you won't need these operations very often. +And if you find yourself thinking you _do_ need them in a certain situation, you should strongly consider an alternative solution: a _stateless session_. + +[[stateless-sessions]] +=== Stateless sessions + +An arguably-underappreciated feature of Hibernate is the `StatelessSession` interface, which provides a command-oriented, more bare-metal approach to interacting with the database. + +You may obtain a reactive stateless session from the `SessionFactory`: + +[source, JAVA, indent=0] +---- +Stage.StatelessSession ss = getSessionFactory().openStatelessSession(); +---- + +A stateless session: + +- doesn't have a first-level cache (persistence context), nor does it interact with any second-level caches, and +- doesn't implement transactional write-behind or automatic dirty checking, so all operations are executed immediately when they're explicitly called. + +For a stateless session, we're always working with detached objects. +Thus, the programming model is a bit different: + +.Important methods of the `StatelessSession` +[%autowidth.stretch] +|=== +| Method name and parameters | Effect + +| `get(Class, Object)` | Obtain a detached object, given its type and its id, by executing a `select` +| `fetch(Object)` | Fetch an association of a detached object +| `refresh(Object)` | Refresh the state of a detached object by executing +a `select` +| `insert(Object)` | Immediately `insert` the state of the given transient object into the database +| `update(Object)` | Immediately `update` the state of the given detached object in the database +| `delete(Object)` | Immediately `delete` the state of the given detached object from the database +| `upsert(Object)1 | Immediately `insert` or `update` the state of the given detached object using a SQL `merge into` statement +|=== + +NOTE: There's no `flush()` operation, and so `update()` is always explicit. + +In certain circumstances, this makes stateless sessions easier to work with, but with the caveat that a stateless session is much more vulnerable to data aliasing effects, since it's easy to get two non-identical Java objects which both represent the same row of a database table. + +[%unbreakable] +[CAUTION] +==== +If we use `fetch()` in a stateless session, we can very easily obtain two objects representing the same database row! +==== + +In particular, the absence of a persistence context means that we can safely perform bulk-processing tasks without allocating huge quantities of memory. +Use of a `StatelessSession` alleviates the need to call: + +- `clear()` or `detach()` to perform first-level cache management, and +- `setCacheMode()` to bypass interaction with the second-level cache. + +[%unbreakable] +[TIP] +==== +Stateless sessions can be useful, but for bulk operations on huge datasets, Hibernate can't possibly compete with stored procedures! +==== + +When using a stateless session, you should be aware of the following additional limitations: + +- persistence operations never cascade to associated instances, +- changes to `@ManyToMany` associations and ``@ElementCollection``s cannot be made persistent, and +- operations performed via a stateless session bypass callbacks. + +[[optimistic-and-pessimistic-locking]] +=== Optimistic and pessimistic locking + +Finally, an aspect of behavior under load that we didn't mention above is row-level data contention. +When many transactions try to read and update the same data, the program might become unresponsive with lock escalation, deadlocks, and lock acquisition timeout errors. + +There's two basic approaches to data concurrency in Hibernate: + +- optimistic locking using `@Version` columns, and +- database-level pessimistic locking using the SQL `for update` syntax (or equivalent). + +In the Hibernate community it's _much_ more common to use optimistic locking, and Hibernate makes that incredibly easy. + +[%unbreakable] +[TIP] +==== +Where possible, in a multiuser system, avoid holding a pessimistic lock across a user interaction. +Indeed, the usual practice is to avoid having transactions that span user interactions. For multiuser systems, optimistic locking is king. +==== + +That said, there _is_ also a place for pessimistic locks, which can sometimes reduce the probability of transaction rollbacks. + +Therefore, the `find()`, `lock()`, and `refresh()` methods of the reactive session accept an optional `LockMode`. +We can also specify a `LockMode` for a query. +The lock mode can be used to request a pessimistic lock, or to customize the behavior of optimistic locking: + +.Optimistic and pessimistic lock modes +[%breakable,cols="26,~"] +|=== +| `LockMode` type | Meaning + +| `READ` | An optimistic lock obtained implicitly whenever +an entity is read from the database using `select` +| `OPTIMISTIC` | An optimistic lock obtained when an entity is +read from the database, and verified using a +`select` to check the version when the +transaction completes +| `OPTIMISTIC_FORCE_INCREMENT` | An optimistic lock obtained when an entity is +read from the database, and enforced using an +`update` to increment the version when the +transaction completes +| `WRITE` | A pessimistic lock obtained implicitly whenever +an entity is written to the database using +`update` or `insert` +| `PESSIMISTIC_READ` | A pessimistic `for share` lock +| `PESSIMISTIC_WRITE` | A pessimistic `for update` lock +| `PESSIMISTIC_FORCE_INCREMENT` | A pessimistic lock enforced using an immediate +`update` to increment the version +|=== + +[[statistics]] +=== Collecting statistics + +We may ask Hibernate to collect statistics about its activity by setting this configuration property: + +[%breakable,cols="35,~"] +|=== +| Configuration property name | Property value + +| `hibernate.generate_statistics` | `true` to enable collection of statistics +|=== + +The statistics are exposed by the `Statistics` object: + +[source,java] +---- +long failedVersionChecks = + sessionFactory.getStatistics() + .getOptimisticFailureCount(); + +long publisherCacheMissCount = + sessionFactory.getStatistics() + .getEntityStatistics(Publisher.class.getName()) + .getCacheMissCount() +---- + +:micrometer: https://quarkus.io/guides/micrometer +:smallrye-metrics: https://quarkus.io/guides/microprofile-metrics + +Hibernate's statistics enable observability. +Both {micrometer}[Micrometer] and {smallrye-metrics}[SmallRye Metrics] are capable of exposing these metrics. + +[[slow-queries]] +=== Tracking down slow queries + +When a poorly-performing SQL query is discovered in production, it can sometimes be hard to track down exactly where in the Java code the query originates. +Hibernate offers two configuration properties that can make it easier to identify a slow query and find its source. + +.Settings for tracking slow queries +[%breakable,cols="25,~,~"] +|=== +| Configuration property name | Purpose | Property value + +| `hibernate.log_slow_query` | Log slow queries at the `INFO` level | The minimum execution time, in milliseconds, which characterizes a "slow" query +| `hibernate.use_sql_comments` | Prepend comments to the executed SQL | `true` or `false` +|=== + +When `hibernate.use_sql_comments` is enabled, the text of the HQL query is prepended as a comment to the generated SQL, which usually makes it easy to find the HQL in the Java code. + +The comment text may be customized: + +- by calling `Query.setComment(comment)` or `Query.setHint(AvailableHints.HINT_COMMENT,comment)`, or +- via the `@NamedQuery` annotation. + +[[hibernate-reactive]] +=== Reactive programming with Hibernate + +:hr: https://hibernate.org/reactive/ +:hr-guide: https://hibernate.org/reactive/documentation/2.0/reference/html_single/ + +Finally, many systems which require high scalability now make use of reactive programming and reactive streams. +{hr}[Hibernate Reactive] brings O/R mapping to the world of reactive programming. +You can learn much more about Hibernate Reactive from its {hr-guide}[Reference Documentation]. + +[TIP] +==== +Hibernate Reactive may be used alongside vanilla Hibernate in the same program, and can reuse the same entity classes. +This means you can use the reactive programming model exactly where you need it—perhaps only in one or two places in your system. +You don't need to rewrite your whole program using reactive streams. +==== \ No newline at end of file diff --git a/documentation/src/main/asciidoc/introduction/images/api-overview.png b/documentation/src/main/asciidoc/introduction/images/api-overview.png new file mode 100644 index 000000000000..aee9e050eec7 Binary files /dev/null and b/documentation/src/main/asciidoc/introduction/images/api-overview.png differ diff --git a/documentation/src/main/asciidoc/introduction/images/architecture.png b/documentation/src/main/asciidoc/introduction/images/architecture.png new file mode 100644 index 000000000000..2b88ca763de4 Binary files /dev/null and b/documentation/src/main/asciidoc/introduction/images/architecture.png differ diff --git a/documentation/src/main/asciidoc/introduction/images/associations-big.png b/documentation/src/main/asciidoc/introduction/images/associations-big.png new file mode 100644 index 000000000000..e357c67738bc Binary files /dev/null and b/documentation/src/main/asciidoc/introduction/images/associations-big.png differ diff --git a/documentation/src/main/asciidoc/introduction/images/associations.png b/documentation/src/main/asciidoc/introduction/images/associations.png new file mode 100644 index 000000000000..27444d4132df Binary files /dev/null and b/documentation/src/main/asciidoc/introduction/images/associations.png differ diff --git a/documentation/src/main/asciidoc/introduction/images/entity-lifecyle.png b/documentation/src/main/asciidoc/introduction/images/entity-lifecyle.png new file mode 100644 index 000000000000..180eac4aafa8 Binary files /dev/null and b/documentation/src/main/asciidoc/introduction/images/entity-lifecyle.png differ diff --git a/documentation/src/main/asciidoc/introduction/images/fetching.png b/documentation/src/main/asciidoc/introduction/images/fetching.png new file mode 100644 index 000000000000..cd9fadfebeb5 Binary files /dev/null and b/documentation/src/main/asciidoc/introduction/images/fetching.png differ diff --git a/documentation/src/main/asciidoc/querylanguage/Concepts.adoc b/documentation/src/main/asciidoc/querylanguage/Concepts.adoc new file mode 100644 index 000000000000..f69073d30926 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/Concepts.adoc @@ -0,0 +1,740 @@ +[[basic-concepts]] +== Basic concepts + +This document describes Hibernate Query Language (HQL), which is, I suppose we could say, a dialect of the Java (now Jakarta) Persistence Query Language (JPQL). + +Or is it the other way around? + +[NOTE] +==== +JPQL was inspired by early versions of HQL, and is a proper subset of modern HQL. +Here we focus on describing the complete, more powerful HQL language as it exists today. + +If strict JPA compliance is what you're looking for, use the setting `hibernate.jpa.compliance.query=true`. +With this configuration, any attempt to use HQL features beyond the JPQL subset will result in an exception. + +We don't recommend the use of this setting. +==== + +The truth is that HQL today has capabilities that go far beyond what is possible in plain JPQL. +We're not going to fuss too much about not limiting ourselves to the standard here. +Faced with a choice between writing database-specific native SQL, or database-independent HQL, we know what our preference is. + +[[and-sqk]] +=== HQL and SQL + +Throughout this document, we'll assume you know SQL and the relational model, at least at a basic level. +HQL and JPQL are loosely based on SQL and are easy to learn for anyone familiar with SQL. + +For example, if you understand this SQL query: + +[source,sql] +---- +select book.title, pub.name /* projection */ +from Book as book /* root table */ + join Publisher as pub /* table join */ + on book.publisherId = pub.id /* join condition */ +where book.title like 'Hibernate%' /* restriction (selection) */ +order by book.title /* sorting */ +---- + +Then we bet you can already make sense of this HQL: + +[source,sql] +---- +select book.title, pub.name /* projection */ +from Book as book /* root entity */ + join book.publisher as pub /* association join */ +where book.title like 'Hibernate%' /* restriction (selection) */ +order by book.title /* sorting */ +---- + +You might notice that even for this very simple example, the HQL version is slightly shorter. +This is typical. +Actually, HQL queries are usually much more compact than the SQL they compile to. + +[IMPORTANT] +==== +But there's one huge difference: in HQL, `Book` refers to an entity class written in Java, and `book.title` to a field of that class. +We're not permitted to directly reference database tables and columns in HQL or JPQL. +==== + +In this chapter, we'll demonstrate how similar HQL is to SQL by giving a quick overview of the basic statement types. +You'll be bored to discover they're exactly the ones you expect: `select`, `insert`, `update`, and `delete`. + +[WARNING] +==== +This is a reference guide. +We're not going to explain basic concepts like ternary logic, joins, aggregation, selection, or projection, because that information is freely available elsewhere, and anyway we couldn't possibly do these topics justice here. +If you don't have a firm grasp of these ideas, it's time to pick up a book about SQL or about the relational model. +==== + +But first we need to mention something that's a bit different to SQL. +HQL has a slightly complicated way of dealing with case sensitively. + +=== Lexical structure + +Lexically, JPQL is quite similar to SQL, so in this section we'll limit ourselves to mentioning those places where it differs. + +[[case-sensitivity]] +==== Identifiers and case sensitivity + +An identifier is a name used to refer to an entity, an attribute of a Java class, an <>, or a function. + +For example, `Book`, `title`, `author`, and `upper` are all identifiers, but they refer to different kinds of things. +In HQL and JPQL, the case sensitivity of an identifier depends on the kind of thing the identifier refers to. + +The rules for case sensitivity are: + +- keywords and function names are case-insensitive, but +- identification variable names, Java class names, and the names of attributes of Java classes, are case-sensitive. + +We apologize for this inconsistency. +In hindsight, it might have been better to define the whole language as case-sensitive. + +[%unbreakable] +[NOTE] +==== +Incidentally, it's standard practice to use lowercase keywords in HQL and JPQL. + +The use of uppercase keywords indicates an endearing but unhealthy attachment to the culture of the 1970's. +==== + +Just to reiterate these rules: + +[cols="45,~"] +|=== +| `select`, `SeLeCT`, `sELEct`, and `SELECT` | All the same, `select` is a keyword +| `upper(name)` and `UPPER(name)` | Same, `upper` is a function name +| `from BackPack` and `from Backpack` | Different, refer to different Java classes +| `person.nickName` and `person.nickname` | Different, since the path expression element `nickName` refers to an attribute of an entity defined in Java +| `person.nickName`, `Person.nickName`, and `PERSON.nickName` | All different, since the first element of a path expression is an <> +|=== + +[CAUTION] +==== +The JPQL specification defines identification variables as case-_insensitive_. +And so in strict JPA-compliant mode, Hibernate treats `person.nickName`, `Person.nickName`, and `PERSON.nickName` as the _same_. +==== + +A _quoted identifier_ is written in backticks. Quoting lets you use a keyword as an identifier. + +[source,hql] +---- +select thing.interval.`from` from Thing thing +---- + +Actually, in most contexts, HQL keywords are "soft", and don't need to be quoted. +The parser is usually able to distinguish if the reserved word is being used as a keyword or as an identifier. + +[[comments]] +==== Comments + +Comments in HQL look like multiline comments in Java. +They're delimited by `/\*` and `*/`. + +Neither SQL-style `--` nor Java-style `//` line-ending comments are allowed. + +It's quite rare to see comments in HQL, but perhaps it will be more common now that Java has text blocks. + +[[parameters]] +==== Parameters + +Parameters come in two flavors in JPQL, and HQL supports a third flavor for historical reasons: + +[cols="35,25,~"] +|=== +| Parameter type | Examples | Usage from Java + +| Named parameters | `:name`, `:title`, `:id` | `query.setParameter("name", name)` +| Ordinal parameters | `?1`, `?2`, `?3` | `query.setParameter(1, name)` +| JDBC-style parameters 💀 | `?` | `query.setParameter(1, name)` +|=== + +JDBC-style parameters of form `?` are like ordinal parameters where the index is inferred from the position in the text of the query. +JDBC-style parameters are deprecated. + +[%unbreakable] +[WARNING] +==== +It's _extremely_ important to use parameters to pass user input to the database. +Constructing a query by concatenating HQL fragments with user input is extremely dangerous, opening the door to the possibility of executing arbitrary code on the database server. +==== + +==== Literals + +Some of the syntax for literal values also departs from the standard syntax in ANSI SQL, especially in the area of date/time literals, but we'll discuss all that later, in <>. + + +[[type-system]] +=== Type system + +JPA doesn't have a well-specified type system, but, reading between the lines a bit, the following types may be discerned: + +- entity types, +- numeric values, +- strings, +- dates/times, +- booleans, and +- enumerated types. + +Such a coarse-grained type system is in some sense an insufficient constraint on implementors of the specification, or, viewed from a different perspective, it leaves us quite a lot of flexibility. + +The way HQL interprets this type system is to assign a Java type to every expression in the language. +Thus, numeric expressions have types like `Long`, `Float`, or `BigInteger`, date/time expressions have types like `LocalDate`, `LocalDateTime`, or `Instant`, and boolean expressions are always of type `Boolean`. + +Going further, an expression like `local datetime - document.created` is assigned the Java type `java.time.Duration`, a type which doesn't appear anywhere in the JPA specification. + +Since the language must be executed on SQL databases, every type accommodates null values. + +[[null-values-and-ternary-logic]] +==== Null values and ternary logic + +The SQL `null` behaves quite differently to a null value in Java. + +- In Java, an expression like `number + 1` produces in an exception if `number` is null. +- But in SQL, and therefore also in HQL and JPQL, such an expression evaluates to `null`. + +[IMPORTANT] +==== +It's almost always the case that an operation applied to a null value yields another null value. +This rule applies to function application, to operators like `*` and `||`, to comparison operators like `<` and `=`, and even to logical operations like `and` and `not`. + +The exceptions to this rule are the `is null` operator and the functions `coalesce()` and `ifnull()` which are specifically designed for <>. +==== + +This rule is the source of the famous (and controversial) _ternary logic_ of SQL. +A logical expression like `firstName='Gavin' and team='Hibernate'` isn't restricted to the values `true` and `false`. +It may also be `null`. + +This can in principle lead to some quite unintuitive results: we can't use the law of the excluded middle to reason about logical expressions in SQL! +But in practice, we've once never run into a case where this caused us problems. + +As you probably know, when a logical predicate occurs as a <>, rows for which the predicate evaluates to `null` are _excluded_ from the result set. +That is, in this context at least, a logical null is interpreted as "effectively false". + +[[statement-types]] +=== Statement types + +HQL features four different kinds of statement: + +- `select` queries, +- `update` statements, +- `delete` statements, and +- `insert ... values` and `insert ... select` statements. + +Collectively, `insert`, `update`, and `delete` statements are sometimes called _mutation queries_. +We need to be a little bit careful when executing mutation queries via a stateful session. + +[%unbreakable] +[IMPORTANT] +==== +The effect of an `update` or `delete` statement is not reflected in the persistence context, nor in the state of entity objects held in memory at the time the statement is executed. + +It's the responsibility of the client program to maintain synchronization of state held in memory with the database after execution of an `update` or `delete` statement. +==== + +Let's consider each type of mutation query in turn, beginning with the most useful type. + +[[update]] +==== Update statements + +The https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form[BNF] for an `update` statement is quite straightforward: + +[[update-bnf-example]] +[source, antlrv4] +---- +include::{extrasdir}/statement_update_bnf.txt[] +---- + +The `set` clause has a list of assignments to attributes of the given entity. + +For example: + +[[update-example]] +[source, hql] +---- +update Person set nickName = 'Nacho' where name = 'Ignacio' +---- + +Update statements are polymorphic, and affect mapped subclasses of the given entity class. +Therefore, a single HQL `update` statement might result in multiple SQL update statements executed against the database. + +An `update` statement must be executed using `Query.executeUpdate()`. + +[[update-examples]] +[source, java] +---- +// JPA API +int updatedEntities = entityManager.createQuery( + "update Person p set p.name = :newName where p.name = :oldName") + .setParameter("oldName", oldName) + .setParameter("newName", newName) + .executeUpdate(); +---- +[source, java] +---- +// Hibernate native API +int updatedEntities = session.createMutationQuery( + "update Person set name = :newName where name = :oldName") + .setParameter("oldName", oldName) + .setParameter("newName", newName) + .executeUpdate(); +---- + +The integer value returned by `executeUpdate()` indicates the number of entity instances affected by the operation. + +[NOTE] +==== +In a `JOINED` inheritance hierarchy, multiple rows are required to store a single entity instance. +In this case, the update count returned by Hibernate might not be exactly the same as the number of rows affected in the database. +==== + +An `update` statement, by default, does not affect the column mapped by the `@Version` attribute of the affected entities. + +Adding the keyword `versioned`—writing `update versioned`—specifies that Hibernate should increment the version number or update the last modification timestamp. + +// [NOTE] +// ==== +// `update versioned` does not work with custom version types defined by implementing `UserVersionType`, and is not available in JPQL. +// ==== + +[[update-versioned-example]] +[source, hql] +---- +update versioned Book set title = :newTitle where ssn = :ssn +---- + +Unfortunately, an `update` statement may not directly join other entities, not even using an <>, but it may have subqueries in its `set` clause, or in the `where` clause, which may contain joins. + +[[delete]] +==== Delete statements + +The BNF for a `delete` statement is even simpler: + +[[delete-bnf-example]] +[source, antlrv4] +---- +include::{extrasdir}/statement_delete_bnf.txt[] +---- + +For example: + +[source,hql] +---- +delete Author author where is empty author.books +---- + +As in SQL, the presence or absence of the `from` keyword has absolutely no effect on the semantics of the `update` statement. + +Just like update statements, delete statements are polymorphic, and affect mapped subclasses of the given entity class. +Therefore, a single HQL `delete` statement might result in multiple SQL delete statements executed against the database. + +A `delete` statement is executed by calling `Query.executeUpdate()`. + +The integer value returned by `executeUpdate()` indicates the number of entity instances affected by the operation. + +A `delete` statement may not directly join other entities, but it may have subqueries in the `where` clause, which may contain joins. + +[[insert]] +==== Insert statements + +There are two kinds of `insert` statement: + +- `insert ... values`, where the attribute values to insert are given directly as tuples, and +- `insert ... select`, where the inserted attribute values are sourced from a subquery. + +The first form inserts a single row in the database, or multiple rows if you provide multiple tuples in the `values` clause. +The second form may insert many new rows, or none at all. + +[%unbreakable] +[TIP] +==== +The first sort of `insert` statement is not as useful. +It's usually better to just use `persist()`. + +But you might consider using it to set up test data. +==== + +[NOTE] +==== +`insert` statements are not part of JPQL. +==== + +The BNF for an `insert` statement is: + +[[insert-bnf-example]] +[source, antlrv4] +---- +include::{extrasdir}/statement_insert_bnf.txt[] +---- + +For example: + +[[insert-example]] +[source, hql] +---- +insert Person (id, name) + values (100L, 'Jane Doe'), (200L, 'John Roe') +---- + +[source, hql] +---- +insert into Author (id, name, bio) + select id, name, name || ' is a newcomer for ' || str(year(local date)) + from Person + where id = :pid +---- + +As in SQL, the presence or absence of the `into` keyword has no effect on the semantics of the `insert` statement. + +From these examples we might notice that `insert` statements are in one respect a bit different to `update` and `delete` statements. + +[IMPORTANT] +==== +An `insert` statement is inherently _not_ polymorphic! +Its list of target fields is of fixed length, whereas each subclass of an entity class might declare additional fields. +If the entity is involved in a mapped inheritance hierarchy, only attributes declared directly by the named entity and its superclasses may occur in the list of target fields. +Attributes declared by subclasses may not occur. +==== + +The `queryExpression` in an `insert ... select` statement may be any valid `select` query, with the caveat that the types of the values in the `select` list must match the types of the target fields. + +[NOTE] +==== +This is checked during query compilation rather than allowing the type check to delegate to the database. +This may cause problems when two Java types map to the same database type. +For example, an attribute of type `LocalDateTime` and an attribute or type `Timestamp` both map to the SQL type `timestamp`, but are not considered assignable by the query compiler. +==== + +There are two ways to assign a value to the `@Id` attribute: + +- explicitly specify the id attribute in the list of target fields, and its value in the values assigned to the target fields, or +- omit it, in which case a generated value is used. + +Of course, the second option is only available for entities with database-level id generation (sequences or identity/autoincrement columns). +It's not available for entities whose id generator is implemented in Java, nor for entities whose id is assigned by the application. + +The same two options are available for a `@Version` attribute. +When no version is explicitly specified, the version for a new entity instance is used. + +Like `update` and `delete` statements, an `insert` statement must be executed by calling `Query.executeUpdate()`. + +Now it's time to look at something _much_ more complicated. + +[[select]] +==== Select statements + +Select statements retrieve and analyse data. +This is what we're really here for. + +The full BNF for a `select` query is quite complicated, but there's no need to understand it now. +We're displaying it here for future reference. + +[[select-bnf-example]] +[source, antlrv4] +---- +include::{extrasdir}/statement_select_bnf.txt[] +---- + +Most of the complexity here arises from the interplay of set operators (`union`, `intersect`, and `except`) with sorting. + +We'll describe the various clauses of a query later, in <> and in <>, but for now, to summarize, a query might have these bits: + +[cols="22,22,~"] +|=== +| Clause | Jargon | Purpose + +| `with` | Common table expressions | Declares <> to be used in the following query +| `from` and `join` | Roots and joins | <> the entities involved in the query, and how they're <> to each other +| `where` | Selection/restriction | Specifies a <> on the data returned by the query +| `group by`| Aggregation/grouping | Controls <> +| `having` | Selection/restriction | Specifies a <> to apply _after_ aggregation +| `select` | Projection | Specifies a <> (the things to return from the query) +| `union`, `intersect`, `except` | Set algebra | These are <> applied to the results of multiple subqueries +| `order by` | Ordering | Specifies how the results should be <> +| `limit`, `offset`, `fetch` | Limits | Allows for <> the results +|=== + +Every one of these clauses is optional! + +For example, the simplest query in HQL has no `select` clause at all: + +[[select-simplest-example]] +[source, hql] +---- +from Book +---- + +But we don't necessarily _recommend_ leaving off the `select` list. + +[NOTE] +==== +HQL doesn't require a `select` clause, but JPQL _does_. +==== + +Naturally, the previous query may be written with a `select` clause: + +[source, hql] +---- +select book from Book book +---- + +But when there's no explicit `select` clause, the select list is implied by the result type of the query: + +[source, java] +[%unbreakable] +---- +// result type Book, only the Book selected +List books = + session.createQuery("from Book join authors", Book.class) + .getResultList(); +for (Person person: persons) { + ... +} +---- + +[source, java] +[%unbreakable] +---- +// result type Object[], both Book and Author selected +List booksWithAuthors = + session.createQuery("from Book join authors", Book.class, Object[].class) + .getResultList(); +for (var bookWithAuthor: booksWithAuthors) { + Book book = (Book) bookWithAuthor[0]; + Author author = (Author) bookWithAuthor[1]; + ... +} +---- + +For complicated queries, it's probably best to explicitly specify a `select` list. + +An alternative "simplest" query has _only_ a `select` list: + +[[select-simplest-example-alt]] +[source, hql] +---- +select local datetime +---- + +This results in a SQL `from dual` query (or equivalent). + +[%unbreakable] +[TIP] +==== +Looking carefully at the BNF given above, you might notice that the `select` list may occur either at the beginning of a query, or near the end, right before `order by`. + +Of course, standard SQL, and JPQL, require that the `select` list comes at the beginning. +But it's more natural to put it last: + +[source, hql] +---- +from Book book select book.title, book.isbn +---- + +This form of the query is more readable, because the alias is declared _before_ it's used, just as God and nature intended. +==== + +Naturally, queries are always polymorphic. +Indeed, a fairly innocent-looking HQL query can easily translate to a SQL statement with many joins and unions. + +[TIP] +==== +We need to be a _bit_ careful about that, but actually it's usually a good thing. +HQL makes it very easy to fetch all the data we need in a single trip to the database, and that's absolutely key to achieving high performance in data access code. +Typically, it's much worse to fetch exactly the data we need, but in many round trips to the database server, than it is to fetch just a bit more data than what we're going to need, all a single SQL query. +==== + +[[returning-to-java]] +=== Representing result sets in Java + +One of the most uncomfortable aspects of working with data in Java is that there's no good way to represent a table. +Languages designed for working with data—R is an excellent example—always feature some sort of built-in table or "data frame" type. +Of course, Java's type system gets in the way here. +This problem is much easier to solve in a dynamically-typed language. +The fundamental problem for Java is that it doesn't have tuple types. + +Queries in Hibernate return tables. +Sure, often a column holds whole entity objects, but we're not restricted to returning a single entity, and we often write queries that return multiple entities in each result, or which return things which aren't entities. + +So we're faced with the problem if representing such result sets, and, we're sad to say, there's no fully general and completely satisfying solution. + +Let's begin with the easy case. + +[[query-result-types-single]] +==== Queries with a single projected item + +If there's just one projected item in the `select` list, then, no sweat, that's the type of each query result. + +[source, java] +[%unbreakable] +---- +List results = + entityManager.createQuery("select title from Book", String.class) + .getResultList(); +---- + +There's really no need to fuss about with trying to represent a "tuple of length 1". +We're not even sure what to call those. + +Problems arise as soon as we have multiple items in the `select` list of a query. + +[[query-result-types-multiple]] +==== Queries with multiple projected items + +When there are multiple expressions in the select list then, by default, and in compliance with JPA, each query result is packaged as an array of type `Object[]`. + +[[select-clause-projection-example]] +[source, java] +[%unbreakable] +---- +List results = + entityManager.createQuery("select title, left(book.text, 200) from Book", + Object[].class) + .getResultList(); +for (var result : results) { + String title = (String) result[0]; + String preamble = (String) result[1]; +} +---- + +This is bearable, but let's explore some other options. + +JPA lets us specify that we want each query result packaged as an instance of `jakarta.persistence.Tuple`. +All we have to do is pass the class `Tuple` to `createQuery()`. + +[source, java] +[%unbreakable] +---- +List tuples = + entityManager.createQuery("select title as title, left(book.text, 200) as preamble from Book", + Tuple.class) + .getResultList(); +for (Tuple tuple : tuples) { + String title = tuple.get("title", String.class); + String preamble = tuple.get("preamble", String.class); +} +---- + +The names of the `Tuple` elements are determined by the aliases given to the projected items in the select list. +If no aliases are specified, the elements may be accessed by their position in the list, where the first item is assigned the position zero. + +As an extension to JPA, and in a similar vein, Hibernate lets us pass `Map` or `List`, and have each result packaged as a map or list: + +[source, java] +[%unbreakable] +---- +var results = + entityManager.createQuery("select title as title, left(book.text, 200) as preamble from Book", + Map.class) + .getResultList(); +for (var map : results) { + String title = (String) map.get("title"); + String preamble = (String) map.get("preamble"); +} +---- +[source, java] +[%unbreakable] +---- +var results = + entityManager.createQuery("select title, left(book.text, 200) from Book", + List.class) + .getResultList(); +for (var list : results) { + String title = (String) list.get(0); + String preamble = (String) list.get(1); +} +---- + +Unfortunately, not one of the types `Object[]`, `List`, `Map`, nor `Tuple` lets us access an individual item in a result tuple without a type cast. +Sure `Tuple` does the type cast for us when we pass a class object to `get()`, but it's logically identical. +Fortunately there's one more option, as we're about to see. + +[NOTE] +==== +Actually, `Tuple` really exists to service the criteria query API, and in that context it _does_ enable truly typesafe access to query results. +==== + +Hibernate 6 lets us pass an arbitrary class type with an appropriate constructor to `createQuery()` and will use it to package the query results. +This works extremely nicely with `record` types. + +[[select-clause-implicit-instantiation-example]] +[source, java] +[%unbreakable] +---- +record BookSummary(String title, String summary) {} + +List results = + entityManager.createQuery("select title, left(book.text, 200) from Book", + BookSummary.class) + .getResultList(); +for (var result : results) { + String title = result.title(); + String preamble = result.summary(); +} +---- + +It's important that the constructor of `BookSummary` has parameters which exactly match the items in the `select` list. + +[NOTE] +==== +This class does not need to be mapped or annotated in any way. + +Even if the class _is_ an entity class, the resulting instances are _not_ managed entities and are _not_ associated with the session. +==== + +We must caution that this still isn't typesafe. +In fact, we've just pushed the typecasts down into the call to `createQuery()`. +But at least we don't have to write them explicitly. + +[[select-new]] +==== Instantiation + +In JPA, and in older versions of Hibernate, this functionality required more ceremony. + +[cols="25,~,~,^15"] +|=== +| Result type | Legacy syntax | Streamlined syntax | JPA standard + +| `Map` | `select new map(x, y)` | `select x, y` | ✖/✖ +| `List` | `select new list(x, y)` | `select x, y` | ✖/✖ +| Arbitrary class `Record` | `select new Record(x, y)` | `select x, y` | ✔/✖ +|=== + +For example, the JPA-standard `select new` construct packages the query results into a user-written Java class instead of an array. + +[[select-clause-dynamic-instantiation-example]] +[source, java] +[%unbreakable] +---- +record BookSummary(String title, String summary) {} + +List results = + entityManager.createQuery("select new BookSummary(title, left(book.text, 200)) from Book", + BookSummary.class) + .getResultList(); +for (var result : results) { + String title = result.title(); + String preamble = result.summary(); +} +---- + +Simplifying slightly, the BNF for a projected item is: + +[[select-item-bnf]] +[source, antlrv4] +---- +selection + : (expression | instantiation) alias? + +instantiation + : "NEW" instantiationTarget "(" selection ("," selection)* ")" + +alias + : "AS"? identifier +---- + +Where the list of ``selection``s in an `instantiation` is essentially a nested projection list. + + diff --git a/documentation/src/main/asciidoc/querylanguage/Credits.adoc b/documentation/src/main/asciidoc/querylanguage/Credits.adoc new file mode 100644 index 000000000000..fafebec4c01c --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/Credits.adoc @@ -0,0 +1,9 @@ +[[credits]] +== Credits + +The full list of contributors to Hibernate ORM can be found on the +https://github.com/hibernate/hibernate-orm/graphs/contributors[GitHub repository]. + +The following contributors were involved in this documentation: + +* Gavin King diff --git a/documentation/src/main/asciidoc/querylanguage/Expressions.adoc b/documentation/src/main/asciidoc/querylanguage/Expressions.adoc new file mode 100644 index 000000000000..28f12f922c55 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/Expressions.adoc @@ -0,0 +1,1349 @@ +[[expressions]] +== Expressions + +We now switch gears, and begin describing the language from the bottom up. +The very bottom of a programming language is its syntax for literal values. + +// Essentially, expressions are references that resolve to basic or tuple values. + +[[literals]] +=== Literals + +The most important literal value in this language is `null`. It's assignable to any other type. + +[[boolean-literals]] +==== Boolean literals + +The boolean literal values are the (case-insensitive) keywords `true` and `false`. + +[[string-literals]] +==== String literals + +String literals are enclosed in single quotes. + +[source,hql] +---- +select 'hello world' +---- + +To escape a single quote within a string literal, use a doubled single quote: `''`. + +[[string-literals-example]] +//.String literals examples +[source, hql] +---- +from Book where title like 'Ender''s' +---- + +Alternatively, Java-style double-quoted strings are also allowed, with the usual Java character escape syntax. + +[source,hql] +---- +select "hello\tworld" +---- + +This option is not much used. + +[[numeric-literals]] +==== Numeric literals + +Numeric literals come in several different forms: + +|=== +| Kind | Type | Example + +| Integer literals | `Long`, `Integer`, `BigInteger` | `1`, `3_000_000L`, `2BI` +| Decimal literals | `Double`, `Float`, `BigDecimal` | `1.0`, `123.456F`, `3.14159265BD` +| Hexadecimal literals | `Long`, `Integer` | `0X1A2B`, `0x1a2b` +| Scientific notation | `Double`, `Float`, `BigDecimal` | `1e-6`, `6.674E-11F` +|=== + +For example: + +[[numeric-literals-example]] +[source, hql] +---- +from Book where price < 100.0 +---- +[source, hql] +---- +select author, count(book) +from Author as author + join author.books as book +group by author +having count(book) > 10 +---- + +The type of a numeric literal may be specified using a Java-style postfix: +|=== +| Postfix | Type | Java type + +| `L` or `l` | long integer | `long` +| `D` or `d` | double precision | `double` +| `F` or `f` | single precision | `float` +| `BI` or `bi` | large integer | `BigInteger` +| `BD` or `bd` | exact decimal | `BigDecimal` +|=== + +It's not usually necessary to specify the precision explicitly. + +[NOTE] +==== +In a literal with an exponent, the `E` is case-insensitive. +Similarly, the Java-style postfix is case-insensitive. +==== + +[[datetime-literals]] +==== Date and time literals + +According to the JPQL specification, date and time literals may be specified using the JDBC escape syntax. +Since this syntax is rather unpleasant to look at, HQL provides not one, but two alternatives. + +|=== +| Date/time type | Recommended Java type | JDBC escape syntax 💀| Braced literal syntax | Explicitly typed literal syntax + +| Date | `LocalDate` | `{d 'yyyy-mm-dd'}` | `{yyyy-mm-dd}` | `date yyyy-mm-dd` +| Time | `LocalTime` | `{t 'hh:mm'}` | `{hh:mm}` | `time hh:mm` +| Time with seconds | `LocalTime` | `{t 'hh:mm:ss'}` | `{hh:mm:ss}` | `time hh:mm:ss` +| Datetime | `LocalDateTime` | `{ts 'yyyy-mm-ddThh:mm:ss'}` | `{yyyy-mm-dd hh:mm:ss}` | `datetime yyyy-mm-dd hh:mm:ss` +| Datetime with milliseconds | `LocalDateTime` | `{ts 'yyyy-mm-ddThh:mm:ss.millis'}` | `{yyyy-mm-dd hh:mm:ss.millis}` | `datetime yyyy-mm-dd hh:mm:ss.millis` +| Datetime with an offset | `OffsetDateTime` | `{ts 'yyyy-mm-ddThh:mm:ss+hh:mm'}` | `{yyyy-mm-dd hh:mm:ss +hh:mm}` | `datetime yyyy-mm-dd hh:mm:ss +hh:mm` +| Datetime with a time zone | `OffsetDateTime` | `{ts 'yyyy-mm-ddThh:mm:ss GMT'}` | `{yyyy-mm-dd hh:mm:ss GMT}` | `datetime yyyy-mm-dd hh:mm:ss GMT` +|=== + +Literals referring to the current date and time are also provided. +Again there is some flexibility. + +|=== +| Date/time type | Java type | Underscored syntax | Spaced syntax + +| Date | `java.time.LocalDate` | `local_date` | `local date` +| Time | `java.time.LocalTime` | `local_time` | `local time` +| Datetime | `java.time.LocalDateTime` | `local_datetime` | `local datetime` +| Offset datetime | `java.time.OffsetDateTime`| `offset_datetime` | `offset datetime` +| Instant | `java.time.Instant` | `instant` | `instant` +| Date | `java.sql.Date` 💀| `current_date` | `current date` +| Time | `java.sql.Time` 💀| `current_time` | `current time` +| Datetime | `java.sql.Timestamp` 💀| `current_timestamp` | `current timestamp` +|=== + +Of these, only `local date`, `local time`, `local datetime`, `current_date`, `current_time`, and `current_timestamp` are defined by the JPQL specification. + +[IMPORTANT] +==== +The use of date and time types from the `java.sql` package is strongly discouraged! +Always use `java.time` types in new code. +==== + +[[duration-literals]] +==== Duration literals + +There are two sorts of duration in HQL: + +* _year-day durations_, that is, the length of an interval between two dates, and +* _week-nanosecond durations_, that is, the length of an interval between two datetimes. + +For conceptual reasons, the two kinds of duration cannot be cleanly composed. + +Literal duration expressions are of form `n unit`, for example `1 day` or `10 year` or `100 nanosecond`. + +The unit may be: `day`, `month`, `quarter`, `year`, `second`, `minute`, `hour`, or `nanosecond`. + +[NOTE] +==== +A HQL duration is considered to map to a Java `java.time.Duration`, but semantically they're perhaps more similar to an ANSI SQL `INTERVAL` type. +==== + +[[binary-literals]] +==== Binary string literals + +HQL also provides a choice of formats for binary strings: + +* the braced syntax `{0xDE, 0xAD, 0xBE, 0xEF}`, a list of Java-style hexadecimal byte literals, or +* the quoted syntax `X'DEADBEEF'` or `x'deadbeef'`, similar to SQL. + +[[enum-literals]] +==== Enum literals + +Literal values of a Java enumerated type may be written without needing to specify the enum class name: + +[[enum-example]] +[source, hql] +---- +from Book where status <> OUT_OF_PRINT +---- + +Here, the enum class is inferred from the type of the expression on the left of the comparison operator. + +[[java-constants]] +==== Java constants + +HQL allows any Java `static` constant to be used in HQL, but it must be referenced by its fully-qualified name: + +[[java-constant-example]] +[source, hql] +---- +select java.lang.Math.PI +---- + +[[entity-name-literals]] +==== Literal entity names + +Entity names may also occur as a literal value. They do not need to be qualified. + +[source,hql] +---- +from Payment as payment +where type(payment) = CreditCardPayment +---- + +See <>. + +[[path-expressions]] +=== Identification variables and path expressions + +A path expression is either: + +- a reference to an <>, or +- a _compound path_, beginning with a reference to an identification variable, and followed by a period-separated list of references to entity attributes. + +As an extension to the JPA spec, HQL, just like SQL, allows a compound path expression where the identification variable at the beginning of the path is missing. +That is, instead of `var.foo.bar`, it's legal to write just `foo.bar`. +But this is only allowed when the identification variable may be unambiguously inferred from the first element, `foo` of the compound path. +The query must have exactly one identification variable `var` for which the path `var.foo` refers to an entity attribute. +Note that we will continue to call these paths "compound", even if they only have one element. + +[TIP] +==== +This streamlines the query rather nicely when there's just one root entity and no joins. +But when the query has multiple identification variables it makes the query much harder to understand. +==== + +If an element of a compound path refers to an association, the path expression produces an <>. + +[source,hql] +---- +select book.publisher.name from Book book +---- + +An element of a compound path referring to a many-to-one or on-to-one association may have the <> function applied to it. + +[source,hql] +---- +select treat(order.payment as CreditCardPayment).creditCardNumber from Order order +---- + +If an element of a compound path refers to a collection or many-valued association, it must have one of <> applied to it. + +[source,hql] +---- +select element(book.authors).name from Book book +---- + +No other function may be applied to a non-terminal element of a path expression. + +Alternatively, if the element of the compound path refers to a list or map, it may have the indexing operator applied to it: + +[source,hql] +---- +select book.editions[0].date from Book book +---- + +No other operator may be applied to a non-terminal element of a path expression. + +=== Operator expressions + +HQL has operators for working with strings, numeric values, and date/time types. + +The operator precedence is given by this table, from highest to lowest precedence: + +[cols="40,^20,~"] +|=== +| Precedence class | Type | Operators + +| Grouping and tuple instantiation | | `( ... )` and `(x, y, z)` +| Case lists | | `case ... end` +| Member reference | Binary infix | `a.b` +| Function application | Postfix | `f(x,y)` +| Indexing | Postfix | `a[i]` +| Unary numeric | Unary prefix | `+`, `-` +| Duration conversions | Unary postfix | `by day` and friends +| Binary multiplicative | Binary infix | `*`, `/`, `%` +| Binary additive | Binary infix | `+`, `-` +| Concatenation | Binary infix | `\|\|` +| Nullness | Unary postfix | `is null`, `is empty` +| Containment | Binary infix | `in`, `not in` +| Between | Ternary infix | `between`, `not between` +| Pattern matching | Binary infix | `like`, `ilike`, `not like`, `not ilike` +| Comparison operators | Binary infix | `=`, `<>`, `<`, `>`, `\<=`, `>=` +| Nullsafe comparison | Binary infix | `is distinct from`, `is not distinct from` +| Existence | Unary prefix | `exists` +| Membership | Binary infix | `member of`, `not member of` +| Logical negation | Unary prefix | `not` +| Logical conjunction | Binary infix | `and` +| Logical disjunction | Binary infix | `or` +|=== + +[[concatenation]] +==== String concatenation + +HQL defines two ways to concatenate strings: + +* the SQL-style concatenation operator, `||`, and +* the JPQL-standard `concat()` function. + +See <> for details of the `concat()` function. + +[[concatenation-example]] +[source, hql] +---- +select book.title || ' by ' || listagg(author.name, ' & ') +from Book as book + join book.authors as author +group by book +---- + +Many more operations on strings are defined below, in <>. + +[[numeric-arithmetic]] +==== Numeric arithmetic + +The basic SQL arithmetic operators, `+`,`-`,`*`, and `/` are joined by the remainder operator `%`. + +[[numeric-arithmetic-example]] +[source, hql] +---- +select (1.0 + :taxRate) * sum(item.book.price * item.quantity) +from Order as ord + join ord.items as item +where ord.id = :oid +---- + +When both operands of a binary numeric operator have the same type, the result type of the whole expression is the same as the operands. + +[WARNING] +==== +Thus, `3/2` performs integer division and evaluates to `1`. +==== + +When the operands are of different type, one of the operands is implicitly converted to _wider_ type, with wideness given, in decreasing order, by the list below: + +- `Double` (widest) +- `Float` +- `BigDecimal` +- `BigInteger` +- `Long` +- `Integer` +- `Short` +- `Byte` + +Many more numeric operations are defined below, in <>. + +[[Datetime-arithmetic]] +==== Datetime arithmetic + +Arithmetic involving dates, datetimes, and durations is quite subtle. +Among the issues to consider are: + +- There's two kinds of duration: year-day, and week-nanosecond durations. +The first is a difference between dates; the second is a difference between datetimes. +- We can subtract dates and datetimes, but we can't add them. +- A Java-style duration has much too much precision, and so in order to use it for anything useful, we must somehow truncate it to something coarser-grained. + +Here we list the basic operations. + +[cols="10,38,~,18"] +|=== +| Operator | Expression type | Example | Resulting type + +| `-` | Difference between two dates | `your.birthday - local date` | year-day duration +| `-` | Difference between two datetimes | `local datetime - record.lastUpdated` | week-nanosecond duration +| `+` | Sum of a date and a year-day duration | `local date + 1 week` | date +| `+` | Sum of a datetime and a week-nanosecond duration | `record.lastUpdated + 1 second` | datetime +| `*` | Product of an integer and a duration | `billing.cycles * 30 day` | duration +| `by unit` | Convert a duration to an integer | `(1 year) by day` | integer +|=== + +The `by unit` operator converts a duration to an integer, for example: `(local date - your.birthday) by day` evaluates to the number of days you still have to wait. + +The function `extract(unit from ...)` extracts a field from a date, time, or datetime type, for example, `extract(year from your.birthday)` produces the year in which you were born, and throws away important information about your birthday. + +[IMPORTANT] +==== +Please carefully note the difference between these two operations: `by` and `extract()` both evaluate to an integer, but they have very different uses. +==== + +Additional datetime operations, including the useful `format()` function, are defined below, in <>. + +[[case-expressions]] +=== Case expressions + +Just like in standard SQL, there are two forms of case expression: + +* the _simple_ case expression, and +* the so-called _searched_ case expression. + +[TIP] +==== +Case expressions are verbose. +It's often simpler to use the `coalesce()`, `nullif()`, or `ifnull()` functions, +as described below in <>. +==== + +[[simple-case-expressions]] +[discrete] +===== Simple case expressions + +The syntax of the simple form is defined by: + +[[simple-case-expressions-bnf]] +[source, antlrv4] +---- +include::{extrasdir}/simple_case_bnf.txt[] +---- + +For example: + +[[simple-case-expressions-example]] +[source, hql] +---- +select + case author.nomDePlume + when '' then person.name + else author.nomDePlume end +from Author as author + join author.person as person +---- + +[[searched-case-expressions]] +[discrete] +===== Searched case expressions + +The searched form has the following syntax: + +[[searched-case-expressions-bnf]] +[source, antlrv4] +---- +include::{extrasdir}/searched_case_bnf.txt[] +---- + +For example: + +[[searched-case-expressions-example]] +[source, hql] +---- +select + case + when author.nomDePlume is null then person.name + else author.nomDePlume end +from Author as author + join author.person as person +---- + +A `case` expression may contain complex expression, including operator expressions. + +[[exp-functions]] +=== Functions + +Both HQL and JPQL define some standard functions and make them portable between databases. + +[%unbreakable] +[TIP] +==== +A program that wishes to remain portable between Jakarta Persistence providers should in principle limit itself to the use of the functions which are blessed by the specification. +Unfortunately, there's not so many of them. +==== + +In some cases, the syntax of these functions looks a bit funny at first, for example, `cast(number as String)`, or `extract(year from date)`, or even `trim(leading '.' from string)`. +This syntax is inspired by standard ANSI SQL, and we promise you'll get used to it. + +[%unbreakable] +[IMPORTANT] +==== +HQL abstracts away from the actual database-native SQL functions, letting you write queries which are portable between databases. + +For some functions, and always depending on the database, a HQL function invocation translates to a quite complicated SQL expression! +==== + +In addition, there are several ways to use a database function that's not known to Hibernate. + +[[functions-typecasts]] +==== Types and typecasts + +The following special functions make it possible to discover or narrow expression types: + +[cols="15,~,~,^15"] +|=== +| Special function | Purpose | Signature | JPA standard + +| `type()` | The (concrete) entity name | `type(e)` | ✔ +| `treat()` | Narrow an entity type | `treat(e as Entity)` | ✔ +| `cast()` | Narrow a basic type | `cast(x as Type)` | ✖ +| `str()` | Cast to a string | `str(x)` | ✖ +|=== + +Let's see what these functions do. + +[[function-type]] +[discrete] +===== Evaluating an entity type + +The function `type()`, applied to an identification variable, evaluates to the entity name of the referenced entity. +This is mainly useful when dealing with entity inheritance hierarchies. + +[[entity-type-exp-example]] +[source, hql] +---- +select payment +from Payment as payment +where type(payment) = CreditCardPayment +---- + +[[function-treat]] +[discrete] +===== Narrowing an entity type + +The function `treat()` may be used to narrow the type of an identification variable. +This is useful when dealing with entity inheritance hierarchies. + +[[treat-example]] +[source, hql] +---- +select payment +from Payment as payment +where length(treat(payment as CreditCardPayment).cardNumber) + between 16 and 20 +---- + +The type of the expression `treat(p as CreditCardPayment)` is the narrowed type, `CreditCardPayment`, instead of the declared type `Payment` of `p`. +This allows the attribute `cardNumber` declared by the subtype `CreditCardPayment` to be referenced. + +- The first argument is usually an identification variable. +- The second argument is the target type given as an unqualified entity name. + +The `treat()` function may even occur in a <>. + +[[function-cast]] +[discrete] +===== General typecasts + +The function `cast()` has a similar syntax, but is used to narrow basic types. + +- Its first argument is usually an attribute of an entity, or a more complex expression involving entity attributes. +- Its second argument is the target type given as an unqualified Java class name: +`String`, `Long`, `Integer`, `Double`, `Float`, `Character`, `Byte`, `BigInteger`, `BigDecimal`, `LocalDate`, `LocalTime`, `LocalDateTime`, etc. + +[source, hql] +---- +select cast(id as String) from Order +---- + +[[function-str]] +[discrete] +===== Casting to string + +The function `str(x)` is a synonym for `cast(x as String)`. + +[source, hql] +---- +select str(id) from Order +---- + +[[functions-null]] +==== Functions for working with null values + +The following functions make it easy to deal with null values: + +[cols="15,~,~,^15"] +|=== +| Function | Purpose | Signature | JPA standard + +| `coalesce()` | First non-null argument | `coalesce(x, y, z)` | ✔ +| `ifnull()` | Second argument if first is null | `ifnull(x,y)` | ✖ +| `nullif()` | `null` if arguments are equal | `nullif(x,y)` | ✔ +|=== + +[discrete] +===== Handling null values + +The `coalesce()` function is a sort of abbreviated `case` expression that returns the first non-null operand. + +[[coalesce-example]] +[source, hql] +---- +select coalesce(author.nomDePlume, person.name) +from Author as author + join author.person as person +---- + +[discrete] +===== Handling null values + +HQL allows `ifnull()` as a synonym for `coalesce()` in the case of exactly two arguments. + +[[ifnull-example]] +[source, hql] +---- +select ifnull(author.nomDePlume, person.name) +from Author as author + join author.person as person +---- + +[discrete] +===== Producing null values + +On the other hand, `nullif()` evaluates to null if its operands are equal, or to its first argument otherwise. + +[[nullif-example]] +[source, hql] +---- +select ifnull(nullif(author.nomDePlume, person.name), 'Real name') +from Author as author + join author.person as person +---- + +[[functions-datetime]] +==== Functions for working with dates and times + +There are some very important functions for working with dates and times. + +[cols="15,~,~,^15"] +|=== +| Special function | Purpose | Signature | JPA standard + +| `extract()` | Extract a datetime field | `extract(field from x)` | ✔ +| `format()` | Format a datetime as a string | `format(datetime as pattern)` | ✖ +| `trunc()` or `truncate()` | Datetime truncation | `truncate(datetime, field)` | ✖ +|=== + +[[function-extract]] +[discrete] +===== Extracting date and time fields + +The special function `extract()` obtains a single field of a date, time, or datetime. + +- Its first argument is an expression that evaluates to a date, time, or datetime. +- Its second argument is a date/time _field type_. + +Field types include: `day`, `month`, `year`, `second`, `minute`, `hour`, `day of week`, `day of month`, `week of year`, `date`, `time`, `epoch` and more. +For a full list of field types, see the Javadoc for https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/query/TemporalUnit.html[`TemporalUnit`]. + +[source, hql] +---- +from Order where extract(date from created) = local date +---- +[source, hql] +---- +select extract(year from created), extract(month from created) from Order +---- + +The following functions are abbreviations for `extract()`: + +[cols="15,~,^15"] +|=== +| Function | Long form using `extract()` | JPA standard + +| `year(x)` | `extract(year from x)` | ✖ +| `month(x)` | `extract(month from x)` | ✖ +| `day(x)` | `extract(day from x)` | ✖ +| `hour(x)` | `extract(year from x)` | ✖ +| `minute(x)` | `extract(year from x)` | ✖ +| `second(x)` | `extract(year from x)` | ✖ +|=== + +TIP: These abbreviations aren't part of the JPQL standard, but on the other hand they're a lot less verbose. + +[source, hql] +---- +select year(created), month(created) from Order +---- + +[[function-format]] +[discrete] +===== Formatting dates and times + +The `format()` function formats a date, time, or datetime according to a pattern. + +- Its first argument is an expression that evaluates to a date, time, or datetime. +- Its second argument is a formatting pattern, given as a string. + +The pattern must be written in a subset of the pattern language defined by Java's `java.time.format.DateTimeFormatter`. + +For a full list of `format()` pattern elements, see the Javadoc for https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html#appendDatetimeFormat[`Dialect.appendDatetimeFormat`]. + +[[function-trunc-datetime]] +[discrete] +===== Truncating a date or time type + +The `truncate()` function truncates the precision of a date, time, or datetime to the temporal unit specified by field type. + +- Its first argument is an expression that evaluates to a date, time, or datetime. +- Its second argument is a date/time field type, specifying the precision of the truncated value. + +Supported temporal units are: `year`, `month`, `day`, `hour`, `minute` or `second`. + +Truncating a date, time or datetime value means obtaining a value of the same type in which all temporal units smaller than `field` have been pruned. +For hours, minutes and second this means setting them to `00`. For months and days, this means setting them to `01`. + +[[string-functions]] +==== Functions for working with strings + +Naturally, there are a good number of functions for working with strings. + +[cols="15,30,~,^15"] +|=== +| Function | Purpose | Syntax | JPA standard / ANSI SQL Standard + +| `upper()` | The string, with lowercase characters converted to uppercase | `upper(str)` | ✔ / ✔ +| `lower()` | The string, with uppercase characters converted to lowercase | `lower(str)` | ✔ / ✔ +| `length()` | The length of the string | `length(str)` | ✔ / ✖ +| `concat()` | Concatenate strings | `concat(x, y, z)` | ✔ / ✖ +| `locate()` | Location of string within a string | `locate(patt, str)`, + +`locate(patt, str, start)` | ✔ / ✖ +| `position()` | Similar to `locate()` | `position(patt in str)` | ✖ / ✔ +| `substring()` | Substring of a string (JPQL-style) | `substring(str, start)`, + +`substring(str, start, len)` | ✔ / ✖ +| `substring()` | Substring of a string (ANSI SQL-style) +| `substring(str from start)`, + +`substring(str from start for len)` | ✖ / ✔ +| `trim()` | Trim characters from string | See below | ✔ / ✔ +| `overlay()` | For replacing a substring +| `overlay(str placing rep from start)`, + +`overlay(str placing rep from start for len)` | ✖ / ✔ +| `pad()` | Pads a string with whitespace, or with a specified character +| `pad(str with len)`, + +`pad(str with len leading)`, + +`pad(str with len trailing)`, or + +`pad(str with len leading char)` | ✖ / ✖ +| `left()` | The leftmost characters of a string | `left(str, len)` | ✖ / ✖ +| `right()` | The rightmost characters of a string | `right(str, len)` | ✖ / ✖ +| `replace()` | Replace every occurrence of a pattern in a string | `replace(str, patt, rep)` | ✖ / ✖ +| `repeat()` | Concatenate a string with itself multiple times | `replace(str, times)` | ✖ / ✖ +| `collate()` | Select a collation | `collate(p.name as collation)` | ✖ / ✖ +|=== + +Let's take a closer look at just some of these. + +[IMPORTANT] +==== +Contrary to Java, positions of characters within strings are indexed from 1 instead of 0! +==== + +[discrete] +===== Concatenating strings + +The JPQL-standard and ANSI SQL-standard `concat()` function accepts a variable number of arguments, and produces a string by concatenating them. + +[%unbreakable] +[source, hql] +---- +select concat(book.title, ' by ', listagg(author.name, ' & ')) +from Book as book + join book.authors as author +group by book +---- + +[discrete] +===== Finding substrings + +The JPQL function `locate()` determines the position of a substring within another string. + +- The first argument is the pattern to search for within the second string. +- The second argument is the string to search in. +- The optional third argument is used to specify a position at which to start the search. + +[source, hql] +---- +select locate('Hibernate', title) from Book +---- + +The `position()` function has a similar purpose, but follows the ANSI SQL syntax. + +[source, hql] +---- +select position('Hibernate' in title) from Book +---- + +[discrete] +===== Slicing strings + +Unsurprisingly, `substring()` returns a substring of the given string. + +- The second argument specifies the position of the first character of the substring. +- The optional third argument specifies the maximum length of the substring. + +[source, hql] +---- +select substring(title, 0, position(' for Dummies')) from Book +---- + +[discrete] +===== Trimming strings +The `trim()` function follows the syntax and semantics of ANSI SQL. +It may be used to trim `leading` characters, `trailing` characters, or both. + +[source, hql] +---- +select trim(title) from Book +---- +[source, hql] +---- +select trim(trailing ' ' from text) from Book +---- + +Its BNF is funky: + +[source, antlrv4] +---- +trimFunction + : "TRIM" "(" trimSpecification? trimCharacter? "FROM"? expression ")" ; +trimSpecification + : "LEADING" | "TRAILING" | "BOTH" ; +---- + +[discrete] +===== Collations + +Selects a collation to be used for its string-valued argument. +Collations are useful for <> with `<` or `>`, and in the <>. + +For example, `collate(p.name as ucs_basic)` specifies the SQL standard collation `ucs_basic`. + +IMPORTANT: Collations aren't very portable between databases. + +[[functions-numeric]] +==== Numeric functions + +Of course, we also have a number of functions for working with numeric values. + +[cols="15,~,~,^15"] +|=== +| Function | Purpose | Signature | JPA standard + +| `abs()` | The magnitude of a number | `abs(x)` | ✔ +| `sign()` | The sign of a number | `sign(x)` | ✔ +| `mod()` | Remainder of integer division | `mod(n,d)` | ✔ +| `sqrt()` | Square root of a number | `sqrt(x)` | ✔ +| `exp()` | Exponential function | `exp(x)` | ✔ +| `power()` | Exponentiation | `power(x,y)` | ✔ +| `ln()` | Natural logarithm | `ln(x)` | ✔ +| `round()` | Numeric rounding | `round(number)`, + +`round(number, places)` | ✔ +| `trunc()` or `truncate()` | Numeric truncation | `truncate(number)`, + +`truncate(number, places)` | ✖ +| `floor()` | Floor function | `floor(x)` | ✔ +| `ceiling()` | Ceiling function | `ceiling(x)` | ✔ + +| `log10()` | Base-10 logarithm | `log10(x)` | ✖ +| `log()` | Arbitrary-base logarithm | `log(b,x)` | ✖ +| `pi` | π | `pi` | ✖ +| `sin()`, `cos()`, `tan()`, `asin()`, `acos()`, `atan()` +| Basic trigonometric functions | `sin(theta)`, `cos(theta)` | ✖ +| `atan2()` | Two-argument arctangent (range `(-π,π]`) | `atan2(y, x)` | ✖ +| `sinh()`, `cosh()`, `tanh()` | Hyperbolic functions | `sinh(x)`, `cosh(x)`, `tanh(x)` | ✖ +| `degrees()` | Convert radians to degrees | `degrees(x)` | ✖ +| `radians()` | Convert degrees to radians | `radians(x)` | ✖ +| `least()` | Return the smallest of the given arguments | `least(x, y, z)` |✖ +| `greatest()` | Return the largest of the given arguments | `greatest(x, y, z)` | ✖ +|=== + +We haven't included <>, <>, or <> in this list, because their purpose is more specialized, and because they come with extra special syntax. + +[[functions-collections]] +==== Functions for dealing with collections + +The functions described in this section are especially useful when dealing with `@ElementCollection` mappings, or with collection mappings involving an `@OrderColumn` or `@MapKeyColumn`. + +The following functions accept either: + +1. an identification variable that refers to a <>, or +2. a <> that refers to a collection or many-valued association of an entity. + +In case 2, application of the function produces an <>. + +[[collection-functions]] +[cols="15,20,~,^15"] +|=== +| Function | Applies to | Purpose | JPA standard + +| `size()` | Any collection | The size of a collection | ✔ +| `element()` | Any collection | The element of a set or list | ✖ +| `index()` | Lists | The index of a list element | ✔ +| `key()` | Maps | The key of a map entry | ✔ +| `value()` | Maps | The value of a map entry | ✔ +| `entry()` 💀 | Maps | The whole entry in a map | ✔ +|=== + +The next group of functions always accept a compound path referring to a collection or many-valued association of an entity. +They're interpreted as referring to the collection as a whole. + +Application of one of these function produces implicit subquery. + +[[collective-collection-functions]] +[cols="15,20,~,^15"] +|=== +| Function | Applies to | Purpose | JPA standard + +| `elements()` | Any collection | The elements of a set or list, collectively | ✖ +| `indices()` | Lists | The indexes of a list, collectively | ✖ +| `keys()` | Maps | The keys of a map, collectively | ✖ +| `values()` | Maps | The values of a map, collectively | ✖ +|=== + +This query has an implicit join: + +[[elements-join-example]] +[source, hql] +---- +select title, element(tags) from Book +---- + +This query has an implicit subquery: + +[[elements-subquery-example]] +[source, hql] +---- +select title from Book where 'hibernate' in elements(tags) +---- + +[discrete] +===== Collection sizes + +The `size()` function returns the number of elements of a collection or to-many association. + +[[size-example]] +[source, hql] +---- +select name, size(books) from Author +---- + +[[set-functions]] +[discrete] +===== Set or list elements + +The `element()` function returns a reference to an element of a joined set or list. +For an identification variable (case 1 above), this function is optional. +For a compound path (case 2), it's required. + +[[list-functions]] +[discrete] +===== List indexes + +The `index()` function returns a reference to the index of a joined list. + +In this example, `element()` is optional, but `index()` is required: + +[[index-example]] +[source, hql] +---- +select id(book), index(ed), element(ed) +from Book book as book + join book.editions as ed +---- + +[[map-functions]] +[discrete] +===== Map keys and values + +The `key()` function returns a reference to a key of a joined map. +The `value()` function returns a reference to its value. + +[[key-value-example]] +[source, hql] +---- +select key(entry), value(entry) +from Thing as thing + join thing.entries as entry +---- + +[[elements-indices]] +===== Quantification over collections + +The functions `elements()`, `indices()`, `keys()`, and `values()` are used to quantify over collections. +We may use them with: + +- an <> or <> predicate, +- a <>, or +- an <>. + +[cols="35,~"] +|=== +| Shortcut | Equivalent subquery + +| `exists elements(book.editions)` | `exists (select ed from book.editions as ed)` +| `2 in indices(book.editions)` | `2 in (select index(ed) from book.editions as ed)` +| `10 > all(elements(book.printings))` | `10 > all(select pr from book.printings as pr)` +| `max(elements(book.printings))` | `(select max(pr) from book.printings as pr)` +|=== + +For example: + +[source, hql] +---- +select title from Book where 'hibernate' in elements(tags) +---- + +Don't confuse the `elements()` function with `element()`, the `indices()` function with `index()`, the `keys()` function with `key()`, or the `values()` function with `value()`. +The functions named in singular deal with elements of "flattened" collections. +If not already joined, they add an implicit join to the query. +The functions with plural naming do _not_ flatten a collection by joining it. + +[%unbreakable] +[IMPORTANT] +==== +The following queries are different: +[source, hql] +---- +select title, max(index(revisions)) from Book /* implicit join */ +---- +[source, hql] +---- +select title, max(indices(revisions)) from Book /* implicit subquery */ +---- +The first query produces a single row, with `max()` taken over all books. +The second query produces a row per book, with `max()` taken over the collection elements belonging to the given book. +==== + +[[functions-model]] +==== Functions for working with ids and versions + +Finally, the following functions evaluate the id, version, or natural id of an entity, or the foreign key of a to-one association: + +[[model-functions]] +[cols="12,~,^15"] +|=== +| Function | Purpose | JPA standard + +| `id()` | The value of the entity `@Id` attribute. | ✖ +| `version()` | The value of the entity `@Version` attribute. | ✖ +| `naturalid()` | The value of the entity `@NaturalId` attribute. | ✖ +| `fk()` | The value of the foreign key column mapped by a `@ManyToOne` (or logical `@OneToOne`) association. +Useful with associations annotated `@NotFound`. | ✖ +|=== + +[[user-defined-functions]] +==== Native and user-defined functions + +The functions we've described above are the functions abstracted by HQL and made portable across databases. +But, of course, HQL can't abstract every function in your database. + +There are several ways to call native or user-defined SQL functions. + +- A native or user-defined function may be called using JPQL's `function` syntax, for example, ``function('sinh', phi)``. +(This is the easiest way, but not the best way.) +- A user-written `FunctionContributor` may register user-defined functions. +- A custom `Dialect` may register additional native functions by overriding `initializeFunctionRegistry()`. + +[TIP] +==== +Registering a function isn't hard, but is beyond the scope of this guide. + +(It's even possible to use the APIs Hibernate provides to make your own _portable_ functions!) +==== + +Fortunately, every built-in `Dialect` already registers many native functions for the database it supports. + +[TIP] +==== +Try setting the log category `org.hibernate.HQL_FUNCTIONS` to debug. +Then at startup Hibernate will log a list of type signatures of all registered functions. +==== + + +[[function-sql]] +==== Embedding native SQL in HQL + +The special function `sql()` allows the use of native SQL fragments inside an HQL query. + +The signature of this function is `sql(pattern[, argN]*)`, where `pattern` must be a string literal but the remaining arguments may be of any type. +The pattern literal is unquoted and embedded in the generated SQL. +Occurrences of `?` in the pattern are replaced with the remaining arguments of the function. + +We may use this, for example, to perform a native PostgreSQL typecast: +[source, hql] +---- +from Computer c where c.ipAddress = sql('?::inet', '127.0.0.1') +---- +This results in SQL logically equivalent to: +[source, sql] +---- +select * from Computer c where c.ipAddress = '127.0.0.1'::inet +---- +Or we can use a native SQL operator: +[source, hql] +---- +from Human h order by sql('(? <-> ?)', h.workLocation, h.homeLocation) +---- +And this time the SQL is logically equivalent to: +[source, sql] +---- +select * from Human h where (h.workLocation <-> h.homeLocation) +---- + +[[conditional-expressions]] +=== Predicates + +A predicate is an operator which, when applied to some argument, evaluates to `true` or `false`. +In the world of SQL-style ternary logic, we must expand this definition to encompass the possibility that the predicate evaluates to `null`. +Typically, a predicate evaluates to `null` when one of its arguments is `null`. + +Predicates occur in the `where` clause, the `having` clause and in searched case expressions. + +[[relational-comparisons]] +==== Comparison operators + +The binary comparison operators are borrowed from SQL: `=`, `>`, `>=`, `<`, `\<=`, `<>`. + +TIP: If you prefer, HQL treats `!=` as a synonym for `<>`. + +The operands should be of the same type. + +[[relational-comparisons-example]] +[source, hql] +---- +from Book where price < 1.0 +---- +[source, hql] +---- +from Author as author where author.nomDePlume <> author.person.name +---- +[source, hql] +---- +select id, total +from ( + select ord.id as id, sum(item.book.price * item.quantity) as total + from Order as ord + join Item as item + group by ord +) +where total > 100.0 +---- + +[[between-predicate]] +==== The `between` predicate + +The ternary `between` operator, and its negation, `not between`, determine if a value falls within a range. + +Of course, all three operands must be of compatible type. + +[[between-predicate-example]] +[source, hql] +---- +from Book where price between 1.0 and 100.0 +---- + +[[null-predicate]] +==== Operators for dealing with null + +The following operators make it easier to deal with null values. + +[cols="20,20,15,~"] +|=== +| Operator | Negation | Type | Semantics + +| `is null` | `is not null` | Unary postfix | `true` if the value to the left is null +| `is distinct from` | `is not distinct from` | Binary | `true` if the value on the left is equal to the value on the right, or if both are null +|=== + +[[null-predicate-example]] +[source, hql] +---- +from Author where nomDePlume is not null +---- + +[[collection-operators]] +==== Collection predicates + +The following operators apply to collection-valued attributes and to-many associations. + +[cols="15,15,20,~"] +|=== +| Operator | Negation | Type | Semantics + +| `is empty` | `is not empty` | Unary postfix | `true` if the collection or association on the left has no elements +| `member of` | `not member of` | Binary | `true` if the value on the left is a member of the collection or association on the right +|=== + +[[empty-collection-predicate-example]] +[source, hql] +---- +from Author where books is empty +---- + +[[member-of-collection-predicate-example]] +[source, hql] +---- +select author, book +from Author as author, Book as book +where author member of book.authors +---- + +[[like-predicate]] +==== String pattern matching + +The `like` operator performs pattern matching on strings. +Its friend `ilike` performs case-insensitive matching. + +Their syntax is defined by: + +[[like-predicate-bnf]] +[source, antlrv4] +---- +include::{extrasdir}/predicate_like_bnf.txt[] +---- + +The expression on the right is a pattern, where: + +* `_` matches any single character, +* `%` matches any number of characters, and +* if an escape character is specified, it may be used to escape either of these wildcards. + +[[like-predicate-example]] +[source, hql] +---- +from Book where title not like '% for Dummies' +---- + +The optional `escape` character allows a pattern to include a literal `_` or `%` character. + +As you can guess, `not like` and `not ilike` are the enemies of `like` and `ilike`, and evaluate to the exact opposite boolean values. + +[[in-predicate]] +==== The `in` predicate + +The `in` predicates evaluates to true if the value to its left is in ... well, whatever it finds to its right. + +Its syntax is unexpectedly complicated: + +[[in-predicate-bnf]] +[source, antlrv4, indent=0] +---- +include::{extrasdir}/predicate_in_bnf.txt[] +---- + +This less-than-lovely fragment of the HQL ANTLR grammar tells us that the thing to the right might be: + +- a list of values enclosed in parentheses, +- a subquery, +- one of the collection-handling functions defined <>, or +- a query parameter, + +The type of the expression on the left, and the types of all the values on the right must be compatible. + +[%unbreakable] +[NOTE] +==== +JPQL limits the legal types to string, numeric, date/time, and enum types, and in JPQL the left expression must be either: + +- a _state field_, which means a basic attribute, excluding associations and embedded attributes, or +- an <>. + +HQL is far more permissive. HQL itself does not restrict the type in any way, though the database itself might. +Even embedded attributes are allowed, although that feature depends on the level of support for tuple or "row value" constructors in the underlying database. +==== + +[[in-predicate-example]] +[source, hql] +---- +from Payment as payment +where type(payment) in (CreditCardPayment, WireTransferPayment) +---- +[source, hql] +---- +from Author as author +where author.person.name in (select name from OldAuthorData) +---- +[source, hql] +---- +from Book as book +where :edition in elements(book.editions) +---- + +The next example doesn't work on every database: + +[%unbreakable] +[source, hql] +---- +from Author as author +where (author.person.name, author.person.birthdate) + in (select name, birthdate from OldAuthorData) +---- + +Here we used a "row value" constructor, a seemingly pretty basic feature which is surprisingly-poorly supported. + +[%unbreakable] +[TIP] +==== +Here's a very useful idiom: +[source,java] +---- +List books = + session.createSelectionQuery("from Book where isbn in :isbns", Book.class) + .setParameterList("isbns", listOfIsbns) + .getResultList(); +---- +==== + +[[relational-comparisons-subqueries]] +==== Comparison operators and subqueries + +The binary comparisons we met <> may involve a quantifier, either: + +- a quantified subquery, or +- a quantifier applied to one of the functions defined <>. + +The quantifiers are unary prefix operators: `all`, `every`, `any`, and `some`. + +[cols="10,10,~"] +|=== +| Subquery operator | Synonym | Semantics + +| `every` | `all` | Evaluates to true of the comparison is true for _every_ value in the result set of the subquery +| `any` | `some` | Evaluates to true of the comparison is true for _at least one_ value in the result set of the subquery +|=== + +[[all-subquery-comparison-qualifier-example]] +[source, hql] +---- +from Publisher pub where 100.0 < all(select price from pub.books) +---- + +[[collection-expressions-all-some-example]] +[source, hql] +---- +from Publisher pub where :title = some(select title from pub.books) +---- + +[[exists-predicate]] +==== The `exists` predicate + +The unary prefix `exists` operator evaluates to true if the thing to its right is nonempty. + +The thing to its right might be: + +- a subquery, or +- one of the functions defined <>. + +As you can surely guess, `not exists` evaluates to true if the thing to the right _is_ empty. + +[[collection-expressions-exists-example]] +[source, hql] +---- +from Author where exists element(books) +---- +[source, hql] +---- +from Author as author +where exists ( + from Order join items + where book in elements(author.books) +) +---- + +[[logical-operators]] +==== Logical operators + +The logical operators are binary infix `and` and `or`, and unary prefix `not`. + +Just like SQL, logical expressions are based on ternary logic. +A logical operator evaluates to null if it has a null operand. diff --git a/documentation/src/main/asciidoc/querylanguage/From.adoc b/documentation/src/main/asciidoc/querylanguage/From.adoc new file mode 100644 index 000000000000..45d9a46f256e --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/From.adoc @@ -0,0 +1,573 @@ +[[root-entities-and-joins]] +== Root entities and joins + +The `from` clause, and its subordinate `join` clauses sit right at the heart of most queries. + +[[from-clause]] +=== Declaring root entities + +The `from` clause is responsible for declaring the entities available in the rest of the query, and assigning them aliases, or, in the language of the JPQL specification, _identification variables_. + +[[identification-variables]] +==== Identification variables + +An identification variable is just a name we can use to refer to an entity and its attributes from expressions in the query. +It may be any legal Java identifier. +According to the JPQL specification, identification variables must be treated as case-insensitive language elements. + +[TIP] +==== +The identification variable is actually optional, but for queries involving more than one entity it's almost always a good idea to declare one. + +This _works_, but it isn't particularly good form: +[source,hql] +---- +from Publisher join books join authors join person where ssn = :ssn +---- +==== + +Identification variables may be declared with the `as` keyword, but this is optional. + +[[root-reference]] +==== Root entity references + +A root entity reference, or what the JPQL specification calls a _range variable declaration_, is a direct reference to a mapped `@Entity` type by its entity name. + +[TIP] +==== +Remember, the _entity name_ is the value of the `name` member of the `@Entity` annotation, or the unqualified Java class name by default. +==== + +[[root-reference-jpql-example]] +[source, hql] +---- +select book from Book as book +---- + +In this example, `Book` is the entity name, and `book` is the identification variable. +The `as` keyword is optional. + +Alternatively, a fully-qualified Java class name may be specified. +Then Hibernate will query every entity which inherits the named type. + +[[root-reference-jpql-fqn-example]] +[source, hql] +---- +select doc from org.hibernate.example.AbstractDocument as doc where doc.text like :pattern +---- + +Of course, there may be multiple root entities. + +[[multiple-root-reference-jpql-example]] +[source, hql] +---- +select a, b +from Author a, Author b, Book book +where a in elements(book.authors) + and b in elements(book.authors) +---- + +This query may even be written using the syntax `cross join` in place of the commas: + +[[cross-join-jpql-example]] +[source, hql] +---- +select a, b +from Book book + cross join Author a + cross join Author b +where a in elements(book.authors) + and b in elements(book.authors) +---- + +Of course, it's possible to write old-fashioned pre-ANSI-era joins: + +[source, hql] +---- +select book.title, publisher.name +from Book book, Publisher publisher +where book.publisher = publisher + and book.title like :titlePattern +---- + +But we never write HQL this way. + +[[polymorphism]] +==== Polymorphism + +HQL and JPQL queries are inherently polymorphic. +Consider: + +[[polymorphism-example]] +[source, hql] +---- +select payment from Payment as payment +---- + +This query names the `Payment` entity explicitly. +But the `CreditCardPayment` and `WireTransferPayment` entities inherit `Payment`, and so `payment` ranges over all three types. +Instances of all these entities are returned by the query. + +[NOTE] +==== +The query `from java.lang.Object` is completely legal. (But not very useful!) + +It returns every object of every mapped entity type. +==== + +// This behavior may be slightly adjusted using the `@Polymorphism` annotation. +// +// See <> for more. + +[[derived-root]] +==== Derived roots + +A _derived root_ is an uncorrelated subquery which occurs in the `from` clause. + +[[derived-root-example]] +[source, hql] +---- +select id, total +from ( + select ord.id as id, sum(item.book.price * item.quantity) as total + from Order as ord + join Item as item + group by ord +) +where total > 100.0 +---- + +The derived root may declare an identification variable. + +[source, hql] +---- +select stuff.id, stuff.total +from ( + select ord.id as id, sum(item.book.price * item.quantity) as total + from Order as ord + join Item as item + group by ord +) as stuff +where total > 100.0 +---- + +This feature can be used to break a more complicated query into smaller pieces. + +[IMPORTANT] +==== +We emphasize that a derived root must be an _uncorrelated_ subquery. +It may not refer to other roots declared in the same `from` clause. +==== + +A subquery may also occur in a <>, in which case it may be a correlated subquery. + +[[from-cte]] +==== Common table expressions in `from` clause + +A _common table expression (CTE)_ is like a derived root with a name. +We'll discuss CTEs <>. + +[[join]] +=== Declaring joined entities + +Joins allow us to navigate from one entity to another, via its associations, or via explicit join conditions. +There are: + +- _explicit joins_, declared within the `from` clause using the keyword ``join``, and +- _implicit joins_, which don't need to be declared in the `from` clause. + +An explicit join may be either: + +* an _inner join_, written as `join` or `inner join`, +* a _left outer join_, written as `left join` or `left outer join`, +* a _right outer join_, written as `right join` or `right outer join`, or +* a _full outer join_, written as `full join` or `full outer join`. + +[[root-join]] +==== Explicit root joins + +An explicit root join works just like an ANSI-style join in SQL. + +[[explicit-root-join-example]] +[source, hql] +---- +select book.title, publisher.name +from Book book + join Publisher publisher + on book.publisher = publisher +where book.title like :titlePattern +---- + +The join condition is written out explicitly in the `on` clause. + +[NOTE] +==== +This looks nice and familiar, but it's _not_ the most common sort of join in HQL or JPQL. +==== + +[[explicit-join]] +==== Explicit association joins + +Every explicit association join specifies an entity attribute to be joined. +The specified attribute: + +* is usually a `@OneToMany`, `@ManyToMany`, `@OneToOne`, or `@ManyToOne` association, but +* it could be an `@ElementCollection`, and +* it might even be an attribute of embeddable type. + +In the case of an association or collection, the generated SQL will have a join of the same type. +(For a many-to-many association it will have _two_ joins.) +In the case of an embedded attribute, the join is purely logical and does not result in a join in the generated SQL. + +An explicit join may assign an identification variable to the joined entity. + +[[explicit-inner-join-example]] +[source, hql] +---- +from Book as book + join book.publisher as publisher + join book.authors as author +where book.title like :titlePattern +select book.title, author.name, publisher.name +---- + +For an outer join, we must write our query to accommodate the possibility that the joined association is missing. + +[[explicit-outer-join-example]] +[source, hql] +---- +from Book as book + left join book.publisher as publisher + join book.authors as author +where book.title like :titlePattern +select book.title, author.name, ifnull(publisher.name, '-') +---- + +For further information about collection-valued association references, see <>. + +[[explicit-join-conditions]] +==== Explicit association joins with join conditions + +The `with` or `on` clause allows explicit qualification of the join conditions. + +[NOTE] +==== +The specified join conditions are _added_ to the join conditions specified by the foreign key association. +That's why, historically, HQL uses the keword `with` here: +"with" emphasizes that the new condition doesn't _replace_ the original join conditions. + +The `with` keyword is specific to Hibernate. JPQL uses `on`. +==== + +Join conditions occurring in the `with` or `on` clause are added to the `on` clause in the generated SQL. + +[[explicit-join-with-example]] +[source, hql] +---- +from Book as book + left join book.publisher as publisher + with publisher.closureDate is not null + left join book.authors as author + with author.type <> COLLABORATION +where book.title like :titlePattern +select book.title, author.name, publisher.name +---- + +// The following query is arguably less clear, but it's semantically identical: +// +// [[explicit-join-jpql-on-example]] +// [source, hql] +// ---- +// from Book as book +// left join book.publisher as publisher +// on publisher.closureDate is not null +// left join book.authors as author +// on author.type <> COLLABORATION +// where book.title like :titlePattern +// select book.title, author.name, publisher.name +// ---- + +[[explicit-fetch-join]] +==== Association fetching + +A _fetch join_ overrides the laziness of a given association, specifying that the association should be fetched with a SQL join. +The join may be an inner or outer join. + +* A `join fetch`, or, more explicitly, `inner join fetch`, only returns base entities with an associated entity. +* A `left join fetch`, or—for lovers of verbosity—``left outer join fetch``, returns all the base entities, including those which have no associated joined entity. + +[IMPORTANT] +==== +This is one of the most important features of Hibernate. +To achieve acceptable performance with HQL, you'll need to use `join fetch` quite often. +Without it, you'll quickly run into the dreaded "n+1 selects" problem. +==== + +For example, if `Person` has a one-to-many association named `phones`, the use of `join fetch` in the following query specifies that the collection elements should be fetched in the same SQL query: + +[[explicit-fetch-join-example]] +[source, hql] +---- +select book +from Book as book + left join fetch book.publisher + join fetch book.authors +---- + +In this example, we used a left outer join for `book.publisher` because we also wanted to obtain books with no publisher, but a regular inner join for `book.authors` because every book has at least one author. + +A query may have more than one fetch join, but be aware that: + +* it's perfectly safe to fetch several to-one associations in series or parallel in a single query, and +* a single series of _nested_ fetch joins is also fine, but +* fetching multiple collections or to-many associations in _parallel_ results in a Cartesian product at the database level, and might exhibit very poor performance. + +HQL doesn't disallow it, but it's usually a bad idea to apply a restriction to a ``join fetch``ed entity, since the elements of the fetched collection would be incomplete. +Indeed, it's best to avoid even assigning an identification variable to a fetched joined entity except for the purpose of specifying a nested fetch join. + +[IMPORTANT] +==== +Fetch joins should usually be avoided in limited or paged queries. +This includes: + +- queries executed with limits specified via the `setFirstResult()` and `setMaxResults()` methods of `Query`, or +- queries with a limit or offset declared in HQL, described below in <>. + +Nor should they be used with the `scroll()` and `stream()` methods of the `Query` interface. +==== + +Fetch joins are disallowed in subqueries, where they would make no sense. + +[[join-treat]] +==== Joins with typecasts + +An explicit join may narrow the type of the joined entity using `treat()`. + +[[join-treat-example]] +[source, hql] +---- +from Order as ord + join treat(ord.payments as CreditCardPayment) as creditCardPayment +where length(creditCardPayment.cardNumber) between 16 and 20 +select ord.id, creditCardPayment.cardNumber, creditCardPayment.amount +---- + +Here, the identification variable `ccp` declared to the right of `treat()` has the narrowed type `CreditCardPayment`, instead of the declared type `Payment`. +This allows the attribute `cardNumber` declared by the subtype `CreditCardPayment` to be referenced in the rest of the query. + +See <> for more information about `treat()`. + +[[join-derived]] +==== Subqueries in joins + +A `join` clause may contain a subquery, either: + +- an uncorrelated subquery, which is almost the same as a <>, except that it may have an `on` restriction, or +- a _lateral join_, which is a correlated subquery, and may refer to other roots declared earlier in the same `from` clause. + +The `lateral` keyword just distinguishes the two cases. + +[[derived-join-example]] +[source, hql] +---- +from Phone as phone + left join ( + select call.duration as duration, call.phone.id as cid + from Call as call + order by call.duration desc + limit 1 + ) as longest on cid = phone.id +where phone.number = :phoneNumber +select longest.duration +---- + +This query may also be expressed using a `lateral` join: + +[source, hql] +---- +from Phone as phone + left join lateral ( + select call.duration as duration + from phone.calls as call + order by call.duration desc + limit 1 + ) as longest +where phone.number = :phoneNumber +select longest.duration +---- + +A lateral join may be an inner or left outer join, but not a right join, nor a full join. + +[TIP] +==== +Traditional SQL doesn't allow correlated subqueries in the `from` clause. +A lateral join is essentially just that, but with a different syntax to what you might expect. + +On some databases, `join lateral` is written `cross apply`. +And on Postgres it's plain `lateral`, without `join`. + +It's almost as if they're _deliberately trying_ to confuse us. +==== + +Lateral joins are particularly useful for computing top-N elements of multiple groups. + +[IMPORTANT] +==== +Most databases support some flavor of `join lateral`, and Hibernate emulates the feature for databases which don't. +But emulation is neither very efficient, nor does it support all possible query shapes, so it's important to test on your target database. +==== + +[[implicit-join]] +==== Implicit association joins (path expressions) + +It's not necessary to explicitly `join` every entity that occurs in a query. +Instead, entity associations may be _navigated_, just like in Java: + +* if an attribute is of embedded type, or is a to-one association, it may be further navigated, but +* if an attribute is of basic type, it is considered terminal, and may not be further navigated, and +* if an attribute is collection-valued, or is a to-many association, it may be navigated, but only with the help of `value()`, `element()`, or `key()`. + +It's clear that: + +* A path expression like `author.name` with only two elements just refers to state held directly by an entity with an alias `author` defined in `from` or `join`. +* But a longer path expression, for example, `author.person.name`, might refer to state held by an associated entity. +(Alternatively, it might refer to state held by an embedded class.) + +In the second case, Hibernate with automatically add a join to the generated SQL if necessary. + +[[implicit-join-example]] +[source, hql] +---- +from Book as book +where book.publisher.name like :pubName +---- + +As in this example, implicit joins usually appear outside the `from` clause of the HQL query. +However, they always affect the `from` clause of the SQL query. + +The example above is equivalent to: + +[[implicit-join-alt]] +[source, hql] +[%unbreakable] +---- +select book +from Book as book + join book.publisher as pub +where pub.name like :pubName +---- + +Note that: + +* Implicit joins are always treated as inner joins. +* Multiple occurrences of the same implicit join always refer to the same SQL join. + +This query: + +[[implicit-join-alias-example]] +[source, hql] +---- +select book +from Book as book +where book.publisher.name like :pubName + and book.publisher.closureDate is null +---- + +results in just one SQL join, and is just a different way to write: + +[[implicit-join-alias-alt]] +[source, hql] +---- +select book +from Book as book + join book.publisher as pub +where pub.name like :pubName + and pub.closureDate is null +---- + +[[collection-valued-associations]] +==== Joining collections and many-valued associations + +When a join involves a collection or many-valued association, the declared identification variable refers to the _elements_ of the collection, that is: + +- to the elements of a `Set`, +- to the elements of a `List`, not to their indices in the list, or +- to the values of a `Map`, not to their keys. + +[[collection-valued-associations-example]] +[source, hql] +---- +select publisher.name, author.name +from Publisher as publisher + join publisher.books as book + join book.authors author +where author.name like :namePattern +---- + +In this example, the identification variable `author` is of type `Author`, the element type of the list `Book.authors`. +But if we need to refer to the index of an `Author` in the list, we need some extra syntax. + +You might recall that we mentioned <> and <> a bit earlier. +These functions may be applied to the identification variable declared in a collection join or many-valued association join. + +[cols="12,20,~,~"] +|=== +| Function | Applies to | Interpretation | Notes + +| `value()` or `element()` | Any collection | The collection element or map entry value +| Often optional. +| `index()` | Any `List` with an index column | The index of the element in the list +| For backward compatibility, it's also an alternative to ``key()``, when applied to a map. +| `key()` | Any `Map` | The key of the entry in the list | If the key is of entity type, it may be further navigated. +| `entry()` | Any `Map` | The map entry, that is, the `Map.Entry` of key and value. +| Only legal as a terminal path, and only allowed in the `select` clause. +|=== + +In particular, `index()` and `key()` obtain a reference to a list index or map key. + +[[collection-qualification-example]] +[source, hql] +[%unbreakable] +---- +select book.title, author.name, index(author) +from Book as book + join book.authors as author +---- +[source, hql] +[%unbreakable] +---- +select publisher.name, leadAuthor.name +from Publisher as publisher + join publisher.books as book + join book.authors leadAuthor +where leadAuthor.name like :namePattern + and index(leadAuthor) == 0 +---- + + +[[implicit-collection-join]] +==== Implicit joins involving collections + +A path expression like `book.authors.name` is not considered legal. +We can't just navigate a many-valued association with this syntax. + +Instead, the functions `element()`, `index()`, `key()`, and `value()` may be applied to a path expression to express an implicit join. +So we must write `element(book.authors).name` or `index(book.authors)`. + +[[collection-implicit-join-example]] +[source, hql] +---- +select book.title, element(book.authors).name, index(book.authors) +from Book book +---- + +An element of an indexed collection (an array, list, or map) may even be identified using the index operator: + +[[collection-index-operator-example]] +[source, hql] +---- +select publisher.name, book.authors[0].name +from Publisher as publisher + join publisher.books as book +where book.authors[0].name like :namePattern +---- diff --git a/documentation/src/main/asciidoc/querylanguage/Hibernate_Query_Language.adoc b/documentation/src/main/asciidoc/querylanguage/Hibernate_Query_Language.adoc new file mode 100644 index 000000000000..14e997703463 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/Hibernate_Query_Language.adoc @@ -0,0 +1,25 @@ +:shared-attributes-dir: ../shared/ + +include::{shared-attributes-dir}/common-attributes.adoc[] +include::{shared-attributes-dir}/url-attributes.adoc[] +include::{shared-attributes-dir}/filesystem-attributes.adoc[] +include::{shared-attributes-dir}/renderer-attributes.adoc[] + +:example-dir-model: {testing-project-dir}/src/main/java/org/hibernate/testing/orm/domain/userguide +:example-dir-hql: {core-project-dir}/src/test/java/org/hibernate/orm/test/hql +:extrasdir: extras + += A Guide to Hibernate Query Language +:toc: +:toclevels: 3 + +include::Preface.adoc[] + +:numbered: + +include::Concepts.adoc[] +include::Expressions.adoc[] +include::From.adoc[] +include::Relational.adoc[] +include::Credits.adoc[] + diff --git a/documentation/src/main/asciidoc/querylanguage/Preface.adoc b/documentation/src/main/asciidoc/querylanguage/Preface.adoc new file mode 100644 index 000000000000..4976e23c64d7 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/Preface.adoc @@ -0,0 +1,18 @@ +:shared-attributes-dir: ../shared/ + +include::{shared-attributes-dir}/url-attributes.adoc[] + +[[preface]] +== Preface + +Hibernate 6 is a major redesign of the world's most popular and feature-rich ORM solution. +The redesign has touched almost every subsystem of Hibernate, including the APIs, mapping annotations, and, above all else, the query language. + +This is the second time Hibernate Query Language has been completely reimplemented from scratch, but the first time in more than fifteen years. +In this new incarnation, HQL is far more powerful, and the HQL compiler much more robust. + +At long last, HQL has a feature set to match that of modern dialects of SQL, and is able to take full advantage of the power of modern SQL databases. + +This document is a reference guide to the full feature set of the language, and is the only up-to-date source for those who wish to learn how to write HQL effectively in Hibernate 6. + +If you are unfamiliar with Hibernate, be sure to first read link:{doc-introduction-url}[Introduction to Hibernate] or check out the link:{doc-quick-start-url}[Quick Start]. diff --git a/documentation/src/main/asciidoc/querylanguage/Relational.adoc b/documentation/src/main/asciidoc/querylanguage/Relational.adoc new file mode 100644 index 000000000000..d41e1a2ee1c6 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/Relational.adoc @@ -0,0 +1,988 @@ +[[selection-projection-aggregation]] +== Selection, projection, and aggregation + +Joining is one kind of _relational operation_. +It's an operation that produces relations (tables) from other relations. +Such operations, taken together, form the _relational algebra_. + +We must now understand the rest of this family: restriction a.k.a. selection, projection, aggregation, union/intersection, and, finally, ordering and limiting, operations which are not strictly part of the calculus of relations, but which usually come along for the ride because they're very _useful_. + +We'll start with the operation that's easiest to understand. + +[[where-clause]] +=== Restriction + +The `where` clause restricts the results returned by a `select` query or limits the scope of an `update` or `delete` query. + +NOTE: This operation is usually called _selection_, but since that term is often confused with the `select` keyword, and since both projection and selection involve "selecting" things, here we'll use the less-ambiguous term _restriction_. + +A restriction is nothing more than a single logical expression, a topic we exhausted above in <>. +Therefore, we'll move quickly onto the next, and more interesting, operation. + +[[aggregation]] +=== Aggregation + +An aggregate query is one with <> in its projection list. +It collapses multiple rows into a single row. +Aggregate queries are used for summarizing and analysing data. + +An aggregate query might have a `group by` clause. +The `group by` clause divides the result set into groups, so that a query with aggregate functions in the select list returns not a single result for the whole query, but one result for each group. +If an aggregate query _doesn't_ have a `group by` clause, it always produces a single row of results. + +NOTE: In short, _grouping_ controls the effect of _aggregation_. + +A query with aggregation may also have a `having` clause, a restriction applied to the groups. + +[[group-by]] +==== Aggregation and grouping + +The `group by` clause looks quite similar to the `select` clause—it has a list of grouped items, but: + +- if there's just one item, then the query will have a single result for each unique value of that item, or +- if there are multiple items, the query will have a result for each unique _combination_ or their values. + +The BNF for a grouped item is just: + +[[group-by-item-bnf]] +[source, antlrv4] +---- +include::{extrasdir}/group_by_item_bnf.txt[] +---- + +Consider the following queries: + +[source, hql] +[%unbreakable] +---- +select book.isbn, + sum(quantity) as totalSold, + sum(quantity * book.price) as totalBilled +from Item +where book.isbn = :isbn +---- + +[[group-by-example]] +[source, hql] +[%unbreakable] +---- +select book.isbn, + year(order.dateTime) as year, + sum(quantity) as yearlyTotalSold, + sum(quantity * book.price) as yearlyTotalBilled +from Item +where book.isbn = :isbn +group by year(order.dateTime) +---- + +The first query calculates complete totals over all orders in years. +The second calculates totals for each year, after grouping the orders by year. + +[[group-by-rollup-cube]] +==== Totals and subtotals + +The special functions `rollup()` and `cube()` may be used in the `group by` clause, when supported by the database. +The semantics are identical to SQL. + +These functions are especially useful for reporting. + +* A `group by` clause with `rollup()` is used to produce subtotals and grand totals. +* A `group by` clause with `cube()` allows totals for every combination of columns. + +[[having]] +==== Aggregation and restriction + +In a grouped query, the `where` clause applies to the non-aggregated values (it determines which rows will make it into the aggregation). +The `having` clause also restricts results, but it operates on the aggregated values. + +In an <>, we calculated totals for every year for which data was available. +But our dataset might extend far back into the past, perhaps even as far back as those terrible dark ages before Hibernate 2.0. +So let's restrict our result set to data from our own more civilized times: + +[[group-by-having-example]] +[source, hql] +[%unbreakable] +---- +select book.isbn, + year(order.dateTime) as year, + sum(quantity) as yearlyTotalSold, + sum(quantity * book.price) as yearlyTotalBilled +from Item +where book.isbn = :isbn +group by year(order.dateTime) +having year(order.dateTime) > 2003 + and sum(quantity) > 0 +---- + +The `having` clause follows the same rules as the `where` clause and is also just a logical predicate. +The `having` restriction is applied after grouping and aggregation has already been performed, whereas the `where` clause is applied before the data is grouped or aggregated. + +[[select-clause]] +=== Projection + +The `select` list identifies which objects and values to return as the query results. +This operation is called _projection_. + +[source,antlrv4] +---- +selectClause + : "SELECT" "DISTINCT"? selection (","" selection)* +---- + +Any of the expression types discussed in <> may occur in the projection list, unless otherwise noted. + +[TIP] +==== +If a query has no explicit `select` list, then, as we saw <>, the projection is inferred from the entities and joins occurring in the `from` clause, together with the result type specified by the call to `createQuery()`. +But it's better to specify the projection explicitly, except in the simplest cases. +==== + +[[distinct]] +==== Duplicate removal + +The `distinct` keyword helps remove duplicate results from the query result list. +It's only effect is to add `distinct` to the generated SQL. + +[[distinct-projection-query-example]] +[source, hql] +---- +select distinct lastName from Person +---- +[source, hql] +---- +select distinct author +from Publisher as pub + join pub.books as book + join book.authors as author +where pub.id = :pid +---- + + +[NOTE] +==== +As of Hibernate 6, duplicate results arising from the use of `join fetch` are automatically removed by Hibernate in memory, _after_ reading the database results and materializing entity instances as Java objects. +It's no longer necessary to remove duplicate results explicitly, and, in particular, `distinct` should not be used for this purpose. +==== + +[[aggregate-functions]] +==== Aggregate functions + +It's common to have aggregate functions like `count()`, `sum()`, and `max()` in a select list. +Aggregate functions are special functions that reduce the size of the result set. + +The standard aggregate functions defined in both ANSI SQL and JPQL are these ones: + +[cols="30,~,~,^15"] +|=== +| Aggregate function | Argument type | Result type | JPA standard / ANSI SQL standard + +| `count()`, including `count(distinct)`, `count(all)`, and `count(*)` | Any | `Long` | ✔/✔ +| `avg()` | Any numeric type | `Double` | ✔/✔ +| `min()` | Any numeric type, or string | Same as the argument type | ✔/✔ +| `max()` | Any numeric type, or string | Same as the argument type | ✔/✔ +| `sum()` | Any numeric type | See table below | ✔/✔ +| `var_pop()`, `var_samp()` | Any numeric type | `Double` | ✖/✔ +| `stddev_pop()`, `stddev_samp()` | Any numeric type | `Double` | ✖/✔ +|=== + +[[aggregate-functions-example]] +[source, hql] +[%unbreakable] +---- +select count(distinct item.book) +from Item as item +where year(item.order.dateTime) = :year +---- +[source, hql] +[%unbreakable] +---- +select sum(item.quantity) as totalSales +from Item as item +where item.book.isbn = :isbn +---- +[source, hql] +[%unbreakable] +---- +select + year(item.order.dateTime) as year, + sum(item.quantity) as yearlyTotal +from Item as item +where item.book.isbn = :isbn +group by year(item.order.dateTime) +---- +[source, hql] +[%unbreakable] +---- +select + month(item.order.dateTime) as month, + avg(item.quantity) as monthlyAverage +from Item as item +where item.book.isbn = :isbn +group by month(item.order.dateTime) +---- + +In the case of `sum()`, the rules for assigning a result type are: +|=== +| Argument type | Result type + +| Any integral numeric type except `BigInteger` | `Long` +| Any floating point numeric type | `Double` +| `BigInteger` | `BigInteger` +| `BigDecimal` | `BigDecimal` +|=== + +HQL defines two additional aggregate functions which accept a logical predicate as an argument. + +[cols="30,~,~,^15"] +|=== +| Aggregate function | Argument type | Result type | JPA standard + +| `any()` | Logical predicate | `Boolean` | ✖ +| `every()` | Logical predicate | `Boolean` | ✖ +|=== + +We may write, for example, `every(p.amount < 1000.0)`. + +Below, we'll meet the <>. + +NOTE: Aggregate functions usually appear in the `select` clause, but control over aggregation is the responsibility of the `group by` clause, as described <>. + +[[aggregate-functions-collections]] +==== Aggregate functions and collections + +The `elements()` and `indices()` functions we met <> let us apply aggregate functions to a collection: + +[cols="18,15,~,~"] +|=== +| New syntax | Legacy HQL function 💀 | Applies to | Purpose + +| `max(elements(x))` | `maxelement(x)` | Any collection with sortable elements | The maximum element or map value +| `min(elements(x))` | `minelement(x)` | Any collection with sortable elements | The minimum element or map value +| `sum(elements(x))` | — | Any collection with numeric elements | The sum of the elements or map values +| `avg(elements(x))` | — | Any collection with numeric elements | The average of the elements or map values +| `max(indices(x))` | `maxindex(x)` | Indexed collections (lists and maps) | The maximum list index or map key +| `min(indices(x))` | `minindex(x)` | Indexed collections (lists and maps) | The minimum list index or map key +| `sum(indices(x))` | — | Indexed collections (lists and maps) | The sum of the list indexes or map keys +| `avg(indices(x))` | — | Indexed collections (lists and maps) | The average of the list indexes or map keys +|=== + +These operations are mostly useful when working with ``@ElementCollection``s. + +[[collection-expressions-example]] +[source, hql] +---- +select title, max(indices(authors))+1, max(elements(editions)) from Book +---- + +[[aggregate-functions-filter]] +==== Aggregate functions with restriction + +All aggregate functions support the inclusion of a _filter clause_, a sort of mini-`where` applying a restriction to just one item of the select list: + +[[aggregate-functions-filter-example]] +[source, hql] +[%unbreakable] +---- +select + year(item.order.dateTime) as year, + sum(item.quantity) filter (where not item.order.fulfilled) as unfulfilled, + sum(item.quantity) filter (where item.order.fulfilled) as fulfilled, + sum(item.quantity * item.book.price) filter (where item.order.paid) +from Item as item +where item.book.isbn = :isbn +group by year(item.order.dateTime) +---- + +The BNF for the `filter` clause is simple: + +[source,antlrv4] +---- +filterClause + : "FILTER" "(" "WHERE" predicate ")" +---- + +[[aggregate-functions-orderedset]] +==== Ordered set aggregate functions + +An _ordered set aggregate function_ is a special aggregate function which has: + +- not only an optional filter clause, as above, but also +- a `within group` clause containing a mini-`order by` specification. + +The BNF for `within group` is straightforward: + +[source,antlrv4] +---- +withinGroupClause + : "WITHIN" "GROUP" "(" "ORDER" "BY" sortSpecification ("," sortSpecification)* ")" +---- + +There are two main types of ordered set aggregate function: + +- an _inverse distribution function_ calculates a value that characterizes the distribution of values within the group, for example, `percentile_cont(0.5)` is the median, and `percentile_cont(0.25)` is the lower quartile. +- a _hypothetical set function_ determines the position of a "hypothetical" value within the ordered set of values. + +The following ordered set aggregate functions are available on many platforms: + +[cols="30,~"] +|=== +| Type | Functions + +| Inverse distribution functions | `mode()`, `percentile_cont()`, `percentile_disc()` +| Hypothetical set functions | `rank()`, `dense_rank()`, `percent_rank()`, `cume_dist()` +| Other | `listagg()` +|=== + +This query calculates the median price of a book: + +[source, hql] +---- +select percentile_cont(0.5) + within group (order by price) +from Book +---- + +This query finds the percentage of books with prices less than 10 dollars: + +[source, hql] +---- +select 100 * percent_rank(10.0) + within group (order by price) +from Book +---- + +Actually, the most widely-supported ordered set aggregate function is one which builds a string by concatenating the values within a group. +This function has different names on different databases, but HQL abstracts these differences, and—following ANSI SQL—calls it `listagg()`. + +[[aggregate-functions-within-group-example]] +[source, hql] +[%unbreakable] +---- +select listagg(title, ', ') + within group (order by isbn) +from Book +group by element(authors) +---- + +This very useful function produces a string by concatenation of the aggregated values of its argument. + +[[aggregate-functions-window]] +==== Window functions + +A _window function_ is one which also has an `over` clause, for example: + +[source,hql] +[%unbreakable] +---- +select + item.order.dateTime, + sum(item.quantity) + over (order by item.order.dateTime) + as runningTotal +from Item item +---- + +This query returns a running total of sales over time. +That is, the `sum()` is taken over a window comprising the current row of the result set, together with all previous rows. + +A window function application may optionally specify any of the following clauses: + +[cols="23,18,~"] +|=== +| Optional clause | Keyword | Purpose + +| _Partitioning_ of the result set | `partition by` | Very similar to `group by`, but doesn't collapse each partition to a single row +| _Ordering_ of the partition | `order by` | Specifies the order of rows within a partition +| _Windowing_ | `range`, `rows`, or `groups` | Defines the bounds of a window frame within a partition +| _Restriction_ | `filter` | As aggregate functions, window functions may optionally specify a filter +|=== + +For example, we may partition the running total by book: + +[source,hql] +---- +select + item.book.isbn, + item.order.dateTime, + sum(item.quantity) + over (partition by item.book + order by item.order.dateTime) + as runningTotal +from Item item +---- + +Every partition runs in isolation, that is, rows can't leak across partitions. + +The full syntax for window function application is amazingly involved, as shown by this BNF: + +[source,antlrv4] +---- +overClause + : "OVER" "(" partitionClause? orderByClause? frameClause? ")" + +partitionClause + : "PARTITION" "BY" expression ("," expression)* + +frameClause + : ("RANGE"|"ROWS"|"GROUPS") frameStart frameExclusion? + | ("RANGE"|"ROWS"|"GROUPS") "BETWEEN" frameStart "AND" frameEnd frameExclusion? + +frameStart + : "CURRENT" "ROW" + | "UNBOUNDED" "PRECEDING" + | expression "PRECEDING" + | expression "FOLLOWING" + +frameEnd + : "CURRENT" "ROW" + | "UNBOUNDED" "FOLLOWING" + | expression "PRECEDING" + | expression "FOLLOWING" + +frameExclusion + : "EXCLUDE" "CURRENT" "ROW" + | "EXCLUDE" "GROUP" + | "EXCLUDE" "TIES" + | "EXCLUDE" "NO" "OTHERS" +---- + +Window functions are similar to aggregate functions in the sense that they compute some value based on a "frame" comprising multiple rows. +But unlike aggregate functions, window functions don't flatten rows within a window frame. + +[discrete] +===== Window frames + +The _window frame_ is the set of rows within a given partition that is passed to the window function. +There's a different window frame for each row of the result set. +In our example, the window frame comprised all the preceding rows within the partition, that is, all the rows with the same `item.book` and with an earlier `item.order.dateTime`. + +The boundary of the window frame is controlled via the windowing clause, which may specify one of the following modes: + +[cols="8,40,20,~"] +|=== +| Mode | Definition | Example | Interpretation + +|`rows` | Frame bounds defined by a given number of rows | `rows 5 preceding` | The previous 5 rows in the partition +| `groups` | Frame bounds defined by a given number of _peer groups_, rows belonging to the same peer group if they are assigned the same position by `order by` | `groups 5 preceding` | The rows in the previous 5 peer groups in the partition +| `range` | Frame bounds defined by a maximum difference in _value_ of the expression used to `order by` | `range between 1.0 preceding and 1.0 following` | The rows whose `order by` expression differs by a maximum absolute value of `1.0` from the current row +|=== + +The frame exclusion clause allows excluding rows around the current row: + +[cols="20,~"] +|=== +| Option | Interpretation + +| `exclude current row` | Excludes the current row +| `exclude group` | Excludes rows of the peer group of the current row +| `exclude ties` | Excludes rows of the peer group of the current row except the current row +| `exclude no others` | The default, does not exclude anything +|=== + +By default, the window frame is defined as `rows between unbounded preceding and current row exclude no others`, meaning every row up to and including the current row. + +[IMPORTANT] +==== +The modes `range` and `groups`, along with frame exclusion modes, are not available on every database. +==== +[discrete] +===== Widely supported window functions + +The following window functions are available on all major platforms: + +[cols="15,~,30"] +|=== +| Window function | Purpose | Signature + +| `row_number()` | The position of the current row within its frame | `row_number()` +| `lead()` | The value of a subsequent row in the frame | `lead(x)`, `lead(x, i, x)` +| `lag()` | The value of a previous row in the frame | `lag(x)`, `lag(x, i, x)` +| `first_value()` | The value of a first row in the frame | `first_value(x)` +| `last_value()` | The value of a last row in the frame | `last_value(x)` +| `nth_value()` | The value of the `n`th row in the frame | `nth_value(x, n)` +|=== + +In principle every aggregate or ordered set aggregate function might also be used as a window function, just by specifying `over`, but not every function is supported on every database. + +[IMPORTANT] +==== +Window functions and ordered set aggregate functions aren't available on every database. +Even where they are available, support for particular features varies widely between databases. +Therefore, we won't waste time going into further detail here. +For more information about the syntax and semantics of these functions, consult the documentation for your dialect of SQL. +==== + +[[set-operators]] +=== Operations on result sets + +These operators apply not to expressions, but to entire result sets: + +- `union` and `union all`, +- `intersect` and `intersect all`, and +- `except` and `except all`. + +Just like in SQL, `all` suppresses the elimination of duplicate results. + +[[union-example]] +[source, hql] +---- +select nomDePlume from Author where nomDePlume is not null +union +select name from Person +---- + +[[order-by]] +=== Sorting + +By default, the results of the query are returned in an arbitrary order. + +[NOTE] +==== +Imposing an order on a set is called _sorting_. + +A relation (a database table) is a set, and therefore certain particularly dogmatic purists have argued that sorting has no place in the algebra of relations. +We think this is more than a bit silly: practical data analysis almost always involves sorting, which is a perfectly well-defined operation. +==== + +The `order by` clause specifies a list of projected items used to sort the results. +Each sorted item may be: + +- an attribute of an entity or embeddable class, +- a more complex <>, +- the alias of a projected item declared in the select list, or +- a literal integer indicating the ordinal position of a projected item in the select list. + +Of course, in principle, only certain types may be sorted: numeric types, string, and date and time types. +But HQL is very permissive here and will allow an expression of almost any type to occur in a sort list. +Even the identification variable of an entity with a sortable identifier type may occur as a sorted item. + +[NOTE] +==== +The JPQL specification requires that every sorted item in the `order by` clause also occur in the `select` clause. +HQL does not enforce this restriction, but applications desiring database portability should be aware that some databases _do_. + +Therefore, you might wish to avoid the use of complex expressions in the sort list. +==== + +The BNF for a sorted item is: + +[[order-by-item-bnf]] +[source, antlrv4] +---- +include::{extrasdir}/order_by_item_bnf.txt[] +---- + +Each sorted item listed in the `order by` clause may explicitly specify a direction, either: + +- `asc` for ascending order, or +- `desc` for descending order. + +If no direction is explicitly specified, the results are returned in ascending order. + +Of course, there's an ambiguity with respect to null values. +Therefore, the sorting of null values may be explicitly specified: + +[cols="20,~"] +|=== +| Precedence | Interpretation + +| `nulls first` | Puts null values at the beginning of the result set +| `nulls last` | Puts them at the end +|=== + +[[order-by-example]] +[source, hql] +---- +select title, publisher.name +from Book +order by title, publisher.name nulls last +---- +[source, hql] +---- +select book.isbn, + year(order.dateTime) as year, + sum(quantity) as yearlyTotalSold, + sum(quantity * book.price) as yearlyTotalBilled +from Item +where book.isbn = :isbn +group by year(order.dateTime) +having year(order.dateTime) > 2000 + and sum(quantity) > 0 +order by yearlyTotalSold desc, year desc +---- + +Queries with an ordered result list may have limits or pagination. + +[[limit-offset]] +==== Limits and offsets + +It's often useful to place a hard upper limit on the number of results that may be returned by a query. +The `limit` and `offset` clauses are an alternative to the use of `setMaxResults()` and `setFirstResult()` respectively, +and may similarly be used for pagination. + +[TIP] +==== +If the `limit` or `offset` is parameterized, it's much easier to use `setMaxResults()` or `setFirstResult()`. +==== + +The SQL `fetch` syntax is supported as an alternative: + +[cols="25,45,~"] +|=== +| Short form | Verbose form | Purpose + +| `limit 10` | `fetch first 10 rows only` | Limit result set +| `limit 10 offset 20` | `offset 20 rows fetch next 10 rows only` | Paginate result set +|=== + +The BNF gets a bit complicated: + +[[limit-offset-bnf]] +[source, antlrv4] +---- +include::{extrasdir}/limit_offset_bnf.txt[] +---- + +These two queries are identical: + +[[limit-example]] +[source, hql] +---- +select title from Book +order by title, published desc +limit 50 +---- +[source, hql] +---- +select title from Book +order by title, published desc +fetch first 50 rows only +---- + +These are well-defined limits: the number of results returned by the database will be limited to 50, as promised. +But not every query is quite so well-behaved. + +[NOTE] +==== +_Limiting_ certainly _isn't_ a well-defined relational operation, and must be used with care. + +In particular, limits don't play well with <>. +==== + +This next query is accepted by HQL, and no more than 50 results are returned by `getResultList()`, just as expected: + +[[bad-limit-example]] +[source, hql] +---- +select title from Book + join fetch authors +order by title, published desc +limit 50 +---- +However, if you log the SQL executed by Hibernate, you'll notice something wrong: + +[source, sql] +---- +select + b1_0.isbn, + a1_0.books_isbn, + a1_0.authors_ORDER, + a1_1.id, + a1_1.bio, + a1_1.name, + a1_1.person_id, + b1_0.price, + b1_0.published, + b1_0.publisher_id, + b1_0.title +from + Book b1_0 +join + (Book_Author a1_0 + join + Author a1_1 + on a1_1.id=a1_0.authors_id) + on b1_0.isbn=a1_0.books_isbn +order by + b1_0.title, + b1_0.published desc +---- + +What happened to the `limit` clause? + +[%unbreakable] +[IMPORTANT] +==== +When limits or pagination are combined with a fetch join, Hibernate must retrieve all matching results from the database and _apply the limit in memory_! + +This _almost certainly_ isn't the behavior you were hoping for, and in general will exhibit _terrible_ performance characteristics. +==== + +[[with-cte]] +=== Common table expressions + +A _common table expression_ or CTE may be thought of as a sort of named subquery. +Any query with an uncorrelated subquery can in principle be rewritten so that the subquery occurs in the `with` clause. + +But CTEs have capabilities that subqueries don't have. +The `with` clause lets us: + +- specify materialization hints, and +- write recursive queries. + +On databases which don't support CTEs natively, Hibernate attempts to rewrite any HQL query with CTEs as a SQL query with subqueries. +This is impossible for recursive queries, unfortunately. + +Let's take a quick look at the BNF: + +[source,antlrv4] +[%unbreakable] +---- +withClause + : "WITH" cte ("," cte)* + +cte + : identifier AS ("NOT"? "MATERIALIZED")? "(" queryExpression ")" + searchClause? cycleClause? +---- + +The `with` clause comes right at the start of a query. +It may declare multiple CTEs with different names. + +[source, hql] +[%unbreakable] +---- +with + paid as ( + select ord.id as oid, sum(payment.amount) as amountPaid + from Order as ord + left join ord.payments as payment + group by ord + having local datetime - ord.dateTime < 365 day + ), + owed as ( + select ord.id as oid, sum(item.quantity*item.book.price) as amountOwed + from Order as ord + left join ord.items as item + group by ord + having local datetime - ord.dateTime < 365 day + ) +select id, paid.amountPaid, owed.amountOwed +from Order +where paid.amountPaid < owed.amountOwed + and paid.oid = id and owed.oid = id +---- + +Notice that if we rewrote this query using subqueries, it would look quite a lot clumsier. + +[[materialization-hints]] +==== Materialization hints + +The `materialized` keyword is a hint to the database that the subquery should be separately executed and its results stored in a temporary table. + +On the other hand, its nemesis, `not materialized`, is a hint that the subquery should be inlined at each use site, with each usage optimized independently. + +[CAUTION] +==== +The precise impact of materialization hints is quite platform-dependant. +==== + +Our example query from above hardly changes. +We just add `materialized` to the CTE declarations. + +[[cte-materialized-example]] +[source, hql] +[%unbreakable] +---- +with + paid as materialized ( + select ord.id as oid, sum(payment.amount) as amountPaid + from Order as ord + left join ord.payments as payment + group by ord + having local datetime - ord.dateTime < 365 day + ), + owed as materialized ( + select ord.id as oid, sum(item.quantity*item.book.price) as amountOwed + from Order as ord + left join ord.items as item + group by ord + having local datetime - ord.dateTime < 365 day + ) +select id, paid.amountPaid, owed.amountOwed +from Order +where paid.amountPaid < owed.amountOwed + and paid.oid = id and owed.oid = id +---- + +[[recursive-queries]] +==== Recursive queries + +A _recursive query_ is one where the CTE is defined self-referentially. +Recursive queries follow a very particular pattern. +The CTE is defined as a union of: + +- a base subquery returning an initial set of rows where the recursion begins, +- a recursively-executed subquery which returns additional rows by joining against the CTE itself. + +Let's demonstrate this with an example. + +First we'll need some sort of tree-like entity: + +[source,java] +[%unbreakable] +---- +@Entity +class Node { + @Id Long id; + String text; + @ManyToOne Node parent; +} +---- + +We may obtain a tree of ``Node``s with the following recursive query: + +[[cte-recursive-example]] +[source, hql] +[%unbreakable] +---- +with Tree as ( + /* base query */ + select root.id as id, root.text as text, 0 as level + from Node root + where root.parent is null + union all + /* recursion */ + select child.id as id, child.text as text, level+1 as level + from Tree parent + join Node child on child.parent.id = parent.id +) +select text, level +from Tree +---- + +When querying a tree-like of data structure, the base subquery usually returns the root node or nodes. +The recursively-executed subquery returns the children of the current set of nodes. +It's executed repeatedly with the results of the previous execution. +Recursion terminates when the recursively-executed subquery returns no new nodes. + +[CAUTION] +==== +Hibernate cannot emulate recursive queries on databases which don't support them natively. +==== + +Now, if a graph contains cycles, that is, if it isn't a tree, the recursion might never terminate. + +==== Cycle detection + +The `cycle` clause enables cycle detection, and aborts the recursion if a node is encountered twice. + +[[cte-cycle-example]] +[source, hql] +[%unbreakable] +---- +with Tree as ( + /* base query */ + select root.id as id, root.text as text, 0 as level + from Node root + where root.parent is null + union all + /* recursion */ + select child.id as id, child.text as text, level+1 as level + from Tree parent + join Node child on child.parent.id = parent.id +) cycle id set abort to 'aborted!' default '' /* cycle detection */ +select text, level, abort +from Tree +order by level +---- + +Here: + +- the `id` column is used to detect cycles, and +- the `abort` column is set to the string value `'aborted!'` if a cycle is detected. + +Hibernate emulates the `cycle` clause on databases which don't support it natively. + +The BNF for `cycle` is: + +[[cte-recursive-cycle-bnf-example]] +[source, antlrv4] +[%unbreakable] +---- +cycleClause + : "CYCLE" identifier ("," identifier)* + "SET" identifier ("TO" literal "DEFAULT" literal)? + ("USING" identifier)? +---- + +The column optionally specified by `using` holds the path to the current row. + +==== Ordering depth-first or breadth-first + +The `search` clause allows us to control whether we would like the results of our query returned in an order that emulates a depth-first recursive search, or a breadth-first recursive search. + +In our query above, we explicitly coded a `level` column that holds the recursion depth, and ordered our result set according to this depth. +With the `search` clause, that bookkeeping is already taken care of for us. + +For depth-first search, we have: + +[source, hql] +[%unbreakable] +---- +with Tree as ( + /* base query */ + select root.id as id, root.text as text + from Node root + where root.parent is null + union all + /* recursion */ + select child.id as id, child.text as text + from Tree parent + join Node child on child.parent.id = parent.id +) search depth first by id set level /* depth-first search */ +from Tree +select text +order by level +---- + +And for breadth-first search, we only need to change a single keyword: + +[source, hql] +[%unbreakable] +---- +with Tree as ( + /* base query */ + select root.id as id, root.text as text + from Node root + where root.parent is null + union all + /* recursion */ + select child.id as id, child.text as text + from Tree parent + join Node child on child.parent.id = parent.id +) search breadth first by id set level /* breadth-first search */ +from Tree +select text +order by level desc +---- + +Hibernate emulates the `search` clause on databases which don't support it natively. + +The BNF for `search` is: + +[[cte-recursive-search-bnf-example]] +[source, antlrv4] +[%unbreakable] +---- +searchClause + : "SEARCH" ("BREADTH"|"DEPTH") "FIRST" + "BY" searchSpecifications + "SET" identifier + +searchSpecifications + : searchSpecification ("," searchSpecification)* + +searchSpecification + : identifier sortDirection? nullsPrecedence? +---- diff --git a/documentation/src/main/asciidoc/querylanguage/extras/group_by_item_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/group_by_item_bnf.txt new file mode 100644 index 000000000000..699918b36c05 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/group_by_item_bnf.txt @@ -0,0 +1 @@ +identifier | INTEGER_LITERAL | expression \ No newline at end of file diff --git a/documentation/src/main/asciidoc/querylanguage/extras/hql-distinct-projection-query-example.sql b/documentation/src/main/asciidoc/querylanguage/extras/hql-distinct-projection-query-example.sql new file mode 100644 index 000000000000..ce9cd996aa96 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/hql-distinct-projection-query-example.sql @@ -0,0 +1,4 @@ +select distinct + p.last_name +from + person p diff --git a/documentation/src/main/asciidoc/querylanguage/extras/hql-read-only-entities-example.sql b/documentation/src/main/asciidoc/querylanguage/extras/hql-read-only-entities-example.sql new file mode 100644 index 000000000000..8d359df31d95 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/hql-read-only-entities-example.sql @@ -0,0 +1,12 @@ +select + c.id, + c.duration, + c.phone_id, + c.call_timestamp +from + phone_call c +join + Phone p + on p.id=c.phone_id +where + p.phone_number='123-456-7890' diff --git a/documentation/src/main/asciidoc/querylanguage/extras/limit_offset_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/limit_offset_bnf.txt new file mode 100644 index 000000000000..7546411d27b4 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/limit_offset_bnf.txt @@ -0,0 +1,11 @@ +limitClause + : "LIMIT" parameterOrIntegerLiteral + +offsetClause + : "OFFSET" parameterOrIntegerLiteral ("ROW" | "ROWS")? + +fetchClause + : "FETCH" ("FIRST" | "NEXT") + (parameterOrIntegerLiteral | parameterOrNumberLiteral "%") + ("ROW" | "ROWS") + ("ONLY" | "WITH" "TIES") diff --git a/documentation/src/main/asciidoc/querylanguage/extras/order_by_item_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/order_by_item_bnf.txt new file mode 100644 index 000000000000..9b865476bc48 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/order_by_item_bnf.txt @@ -0,0 +1,10 @@ +sortExpression sortDirection? nullsPrecedence? + +sortExpression + : identifier | INTEGER_LITERAL | expression + +sortDirection + : "ASC" | "DESC" + +nullsPrecedence + : "NULLS" ("FIRST" | "LAST") diff --git a/documentation/src/main/asciidoc/querylanguage/extras/predicate_in_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/predicate_in_bnf.txt new file mode 100644 index 000000000000..a4cee8358837 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/predicate_in_bnf.txt @@ -0,0 +1,7 @@ +expression "NOT"? "IN" inList + +inList + : collectionQuantifier "(" simplePath ")" + | "(" (expression ("," expression)*)? ")" + | "(" subquery ")" + | parameter diff --git a/documentation/src/main/asciidoc/querylanguage/extras/predicate_like_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/predicate_like_bnf.txt new file mode 100644 index 000000000000..c9e676602390 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/predicate_like_bnf.txt @@ -0,0 +1 @@ +expression "NOT"? ("LIKE" | "ILIKE") expression ("ESCAPE" character)? diff --git a/documentation/src/main/asciidoc/querylanguage/extras/searched_case_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/searched_case_bnf.txt new file mode 100644 index 000000000000..8323b04c750d --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/searched_case_bnf.txt @@ -0,0 +1 @@ +"CASE" ("WHEN" predicate "THEN" expression)+ ("ELSE" expression)? "END" \ No newline at end of file diff --git a/documentation/src/main/asciidoc/querylanguage/extras/select_item_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/select_item_bnf.txt new file mode 100644 index 000000000000..fad7561f50af --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/select_item_bnf.txt @@ -0,0 +1,8 @@ +selection + : (expression | instantiation) alias? + +instantiation + : "NEW" instantiationTarget "(" selection ("," selection)* ")" + +alias + : "AS"? IDENTIFIER \ No newline at end of file diff --git a/documentation/src/main/asciidoc/querylanguage/extras/simple_case_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/simple_case_bnf.txt new file mode 100644 index 000000000000..4230ec3f7821 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/simple_case_bnf.txt @@ -0,0 +1 @@ +"CASE" expression ("WHEN" expression "THEN" expression)+ ("ELSE" expression)? "END" \ No newline at end of file diff --git a/documentation/src/main/asciidoc/querylanguage/extras/statement_delete_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/statement_delete_bnf.txt new file mode 100644 index 000000000000..89d11a67b09f --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/statement_delete_bnf.txt @@ -0,0 +1,2 @@ +deleteStatement + : "DELETE" "FROM"? targetEntity whereClause? diff --git a/documentation/src/main/asciidoc/querylanguage/extras/statement_insert_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/statement_insert_bnf.txt new file mode 100644 index 000000000000..4f2e4caf90a8 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/statement_insert_bnf.txt @@ -0,0 +1,14 @@ +insertStatement + : "INSERT" "INTO"? targetEntity targetFields (queryExpression | valuesList) + +targetEntity + : entityName variable? + +targetFields + : "(" simplePath ("," simplePath)* ")" + +valuesList + : "VALUES" values ("," values)* + +values + : "(" expression ("," expression)* ")" diff --git a/documentation/src/main/asciidoc/querylanguage/extras/statement_select_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/statement_select_bnf.txt new file mode 100644 index 000000000000..307a27693619 --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/statement_select_bnf.txt @@ -0,0 +1,36 @@ +selectStatement + : queryExpression + +queryExpression + : withClause? orderedQuery (setOperator orderedQuery)* + +orderedQuery + : (query | "(" queryExpression ")") queryOrder? + +query + : selectClause fromClause? whereClause? (groupByClause havingClause?)? + | fromClause whereClause? (groupByClause havingClause?)? selectClause? + +queryOrder + : orderByClause limitClause? offsetClause? fetchClause? + +fromClause + : "FROM" entityWithJoins ("," entityWithJoins)* + +entityWithJoins + : fromRoot (join | crossJoin | jpaCollectionJoin)* + +fromRoot + : entityName variable? + | "LATERAL"? "(" subquery ")" variable? + +join + : joinType "JOIN" "FETCH"? joinTarget joinRestriction? + +joinTarget + : path variable? + | "LATERAL"? "(" subquery ")" variable? + +withClause + : "WITH" cte ("," cte)* + ; diff --git a/documentation/src/main/asciidoc/querylanguage/extras/statement_update_bnf.txt b/documentation/src/main/asciidoc/querylanguage/extras/statement_update_bnf.txt new file mode 100644 index 000000000000..065bf853645e --- /dev/null +++ b/documentation/src/main/asciidoc/querylanguage/extras/statement_update_bnf.txt @@ -0,0 +1,11 @@ +updateStatement + : "UPDATE" "VERSIONED"? targetEntity setClause whereClause? + +targetEntity + : entityName variable? + +setClause + : "SET" assignment ("," assignment)* + +assignment + : simplePath "=" expression diff --git a/documentation/src/main/asciidoc/quickstart/guides/credits.adoc b/documentation/src/main/asciidoc/quickstart/guides/credits.adoc new file mode 100644 index 000000000000..54cb14d01a5f --- /dev/null +++ b/documentation/src/main/asciidoc/quickstart/guides/credits.adoc @@ -0,0 +1,9 @@ +[[credits]] +== Credits + +The full list of contributors to Hibernate ORM can be found on the +https://github.com/hibernate/hibernate-orm/graphs/contributors[GitHub repository]. + +The following contributors were involved in this documentation: + +* Steve Ebersole \ No newline at end of file diff --git a/documentation/src/main/asciidoc/quickstart/guides/index.adoc b/documentation/src/main/asciidoc/quickstart/guides/index.adoc index 12d333ae7f21..753eb12fc9f3 100644 --- a/documentation/src/main/asciidoc/quickstart/guides/index.adoc +++ b/documentation/src/main/asciidoc/quickstart/guides/index.adoc @@ -1,16 +1,24 @@ -= Hibernate Getting Started Guide +:shared-attributes-dir: ../../shared/ + +include::{shared-attributes-dir}/common-attributes.adoc[] +include::{shared-attributes-dir}/url-attributes.adoc[] +include::{shared-attributes-dir}/filesystem-attributes.adoc[] +include::{shared-attributes-dir}/renderer-attributes.adoc[] + += Getting Started with Hibernate +:description: Quick start guide for Hibernate ORM :toc: +:toclevels: 3 :docinfo: include::preface.adoc[] :numbered: -include::obtaining.adoc[] -include::tutorial_native.adoc[] +include::obtaining.adoc[] include::tutorial_annotations.adoc[] - include::tutorial_jpa.adoc[] - include::tutorial_envers.adoc[] + +include::credits.adoc[] diff --git a/documentation/src/main/asciidoc/quickstart/guides/obtaining.adoc b/documentation/src/main/asciidoc/quickstart/guides/obtaining.adoc index 621bc6919bcc..d191ee7e21d7 100644 --- a/documentation/src/main/asciidoc/quickstart/guides/obtaining.adoc +++ b/documentation/src/main/asciidoc/quickstart/guides/obtaining.adoc @@ -1,54 +1,116 @@ [[obtaining]] == Obtaining Hibernate -=== The Hibernate Modules/Artifacts - -Hibernate's functionality is split into a number of modules/artifacts meant to isolate dependencies (modularity). - -hibernate-core:: The main (core) Hibernate module. Defines its ORM features and APIs as well as the various integration SPIs. -hibernate-envers:: Hibernate's historical entity versioning feature -hibernate-spatial:: Hibernate's Spatial/GIS data-type support -hibernate-agroal:: Integrates the https://agroal.github.io/[Agroal] connection pooling library into Hibernate -hibernate-c3p0:: Integrates the https://www.mchange.com/projects/c3p0/[C3P0] connection pooling library into Hibernate -hibernate-hikaricp:: Integrates the https://github.com/brettwooldridge/HikariCP/[HikariCP] connection pooling library into Hibernate -hibernate-vibur:: Integrates the https://www.vibur.org/[Vibur DBCP] connection pooling library into Hibernate -hibernate-proxool:: Integrates the https://proxool.sourceforge.net/[Proxool] connection pooling library into Hibernate -hibernate-jcache:: Integrates the https://jcp.org/en/jsr/detail?id=107$$[JCache] caching specification into Hibernate, -enabling any compliant implementation to become a second-level cache provider. -hibernate-community-dialects:: Hibernate's community supported dialects -hibernate-graalvm:: Experimental extension to make it easier to compile applications into a https://www.graalvm.org/[GraalVM] native image -hibernate-micrometer:: Integration for Micrometer metrics into Hibernate as a metrics collection package -hibernate-testing:: Support for testing Hibernate ORM functionality -hibernate-integrationtest-java-modules:: Integration tests for running Hibernate ORM in the Java module path - -=== Release Bundle Downloads - -The Hibernate team provides release bundles hosted on the SourceForge File Release System, in both -`TGZ` and `ZIP` formats. Each release bundle contains `JAR` files, documentation, source code, and other goodness. - -You can download releases of Hibernate, in your chosen format, from the list at -https://sourceforge.net/projects/hibernate/files/hibernate-orm/. The release bundle is structured as follows: - -* The `lib/required/` directory contains the `hibernate-core` jar and all of its dependencies. All of these jars are -required to be available on your classpath no matter which features of Hibernate are being used. -* The `lib/envers` directory contains the `hibernate-envers` jar and all of its dependencies (beyond those in -`lib/required/` and `lib/jpa/`). -* The `lib/spatial/` directory contains the `hibernate-spatial` jar and all of its dependencies (beyond those in `lib/required/`) -* The `lib/jpa-metamodel-generator/` directory contains the jar needed for generating the Criteria API type-safe Metamodel. -* The `lib/optional/` directory contains the jars needed for the various connection pooling and second-level cache integrations -provided by Hibernate, along with their dependencies. - -=== Maven Repository Artifacts - -The authoritative repository for Hibernate artifacts is the JBoss Maven repository. The Hibernate artifacts are -synced to Maven Central as part of an automated job (some small delay may occur). - -The team responsible for the JBoss Maven repository maintains a number of Wiki pages that contain important information: - -* https://community.jboss.org/docs/DOC-14900 - General information about the repository. -* https://community.jboss.org/docs/DOC-15170 - Information about setting up the JBoss repositories in order to do -development work on JBoss projects themselves. -* https://community.jboss.org/docs/DOC-15169 - Information about setting up access to the repository to use JBoss -projects as part of your own software. - -The Hibernate ORM artifacts are published under the `org.hibernate` groupId. \ No newline at end of file +Hibernate is broken into a number of modules/artifacts under the `org.hibernate.orm` +group. The main artifact is named `hibernate-core`. + +[NOTE] +==== +This guide uses 6.2.0.Final as the Hibernate version for illustration purposes. Be sure to change +this version, if necessary, to the version you wish to use. +==== + +We can declare a dependency on this artifact using https://www.gradle.org[Gradle] + +[source,groovy] +---- +dependencies { + implementation "org.hibernate.orm:hibernate-core:6.2.0.Final" +} +---- + +or https://maven.org[Maven]: + +[source,xml] +---- + + org.hibernate.orm + hibernate-core + 6.2.0.Final + +---- + +[[modules]] +=== Hibernate ORM modules + +As mentioned earlier, Hibernate ORM is broken into a number of modules with the intent of isolating +transitive dependencies based on the features being used or not. + +[cols="40m,~"] +.API-oriented modules +|=== +|hibernate-core| The core object/relational mapping engine +|hibernate-envers| Entity versioning and auditing +|hibernate-spatial| Support for spatial/GIS data types using https://github.com/GeoLatte/geolatte-geom[GeoLatte] +|hibernate-jpamodelgen| An annotation processor that generates a JPA-compliant metamodel, plus optional Hibernate extras +|=== + +[cols="40m,~"] +.Integration-oriented modules +|=== +|hibernate-agroal| Support for https://agroal.github.io/[Agroal] connection pooling +|hibernate-c3p0| Support for https://www.mchange.com/projects/c3p0/[C3P0] connection pooling +|hibernate-hikaricp| Support for https://github.com/brettwooldridge/HikariCP/[HikariCP] connection pooling +|hibernate-vibur| Support for https://www.vibur.org/[Vibur DBCP] connection pooling +|hibernate-proxool| Support for https://proxool.sourceforge.net/[Proxool] connection pooling +|hibernate-jcache| Integration with https://jcp.org/en/jsr/detail?id=107$$[JCache], allowing any compliant implementation as a second-level cache provider +|hibernate-graalvm| Experimental extension to make it easier to compile applications as a https://www.graalvm.org/[GraalVM] native image +|hibernate-micrometer| Integration with https://micrometer.io[Micrometer] metrics +|hibernate-community-dialects| Additional community-supported SQL dialects +|=== + +[cols="40m,~"] +.Testing-oriented modules +|=== +|hibernate-testing| A series of JUnit extensions for testing Hibernate ORM functionality +|=== + +[[platform]] +=== Platform / BOM + +Hibernate also provides a platform (BOM in Maven terminology) module which can be used to align versions of the Hibernate modules along with the versions of its libraries. The platform artifact is named `hibernate-platform`. + +To apply the platform in Gradle + +[source,groovy] +---- +dependencies { + implementation platform "org.hibernate.orm:hibernate-platform:6.2.0.Final" + + // use the versions from the platform + implementation "org.hibernate.orm:hibernate-core" + implementation "jakarta.transaction:jakarta.transaction-api" +} +---- + +See the https://docs.gradle.org/current/userguide/java_platform_plugin.html#sec:java_platform_consumption[Gradle documentation] for capabilities of applying a platform. + +To apply the platform (BOM) in Maven + +[source,xml] +---- + + org.hibernate.orm + hibernate-core + + + jakarta.transaction + jakarta.transaction-api + + + + + org.hibernate.orm + hibernate-platform + 6.2.0.Final + pom + import + + +---- + +[[examples]] +=== Example sources +The bundled examples mentioned in this tutorial can be downloaded from link:{doc-quick-start-url}hibernate-tutorials.zip[here]. + +Alternatively, the example source code can also be obtained from https://github.com/hibernate/hibernate-orm/tree/{fullVersion}/documentation/src/main/asciidoc/quickstart/tutorials[Github] \ No newline at end of file diff --git a/documentation/src/main/asciidoc/quickstart/guides/preface.adoc b/documentation/src/main/asciidoc/quickstart/guides/preface.adoc index 03bc59fd1504..b44b21b3538c 100644 --- a/documentation/src/main/asciidoc/quickstart/guides/preface.adoc +++ b/documentation/src/main/asciidoc/quickstart/guides/preface.adoc @@ -1,38 +1,12 @@ -[[preface]] - -[preface] -== Preface - -Working with both Object-Oriented software and Relational Databases can be cumbersome and time-consuming. -Development costs are significantly higher due to a number of "paradigm mismatches" between how data is represented in objects -versus relational databases. Hibernate is an Object/Relational Mapping (ORM) solution for Java environments. The -term Object/Relational Mapping refers to the technique of mapping data between an object model representation to -a relational data model representation. See https://en.wikipedia.org/wiki/Object-relational_mapping for a good -high-level discussion. Also, Martin Fowler's link:$$https://martinfowler.com/bliki/OrmHate.html$$[OrmHate] article -takes a look at many of the mismatch problems. +:shared-attributes-dir: ../../shared/ -Although having a strong background in SQL is not required to use Hibernate, having a basic understanding of the -concepts can help you understand Hibernate more quickly and fully. An understanding of data modeling principles -is especially important. Both https://www.agiledata.org/essays/dataModeling101.html and -https://en.wikipedia.org/wiki/Data_modeling are good starting points for understanding these data modeling -principles. If you are completely new to database access in Java, -https://www.marcobehler.com/guides/a-guide-to-accessing-databases-in-java contains a good overview of the various parts, -pieces and options. +include::{shared-attributes-dir}/url-attributes.adoc[] +include::{shared-attributes-dir}/filesystem-attributes.adoc[] -Hibernate takes care of the mapping from Java classes to database tables, and from Java data types to SQL data -types. In addition, it provides data query and retrieval facilities. It can significantly reduce development -time otherwise spent with manual data handling in SQL and JDBC. Hibernate’s design goal is to relieve the -developer from 95% of common data persistence-related programming tasks by eliminating the need for manual, -hand-crafted data processing using SQL and JDBC. However, unlike many other persistence solutions, Hibernate -does not hide the power of SQL from you and guarantees that your investment in relational technology and -knowledge is as valid as always. - -Hibernate may not be the best solution for data-centric applications that only use stored-procedures to -implement the business logic in the database, it is most useful with object-oriented domain models and business -logic in the Java-based middle-tier. However, Hibernate can certainly help you to remove or encapsulate -vendor-specific SQL code and streamlines the common task of translating result sets from a tabular -representation to a graph of objects. +[[preface]] +== Preface -See https://hibernate.org/orm/contribute/ for information on getting involved. +Hibernate is an _Object/Relational Mapping_ (ORM) solution for programs written in Java and other JVM +languages. -IMPORTANT: The projects and code for the tutorials referenced in this guide are available as link:hibernate-tutorials.zip[] +include::{shared-attributes-dir}/background.adoc[] diff --git a/documentation/src/main/asciidoc/quickstart/guides/tutorial_annotations.adoc b/documentation/src/main/asciidoc/quickstart/guides/tutorial_annotations.adoc index f92f90e2564a..f91f2f39518b 100644 --- a/documentation/src/main/asciidoc/quickstart/guides/tutorial_annotations.adoc +++ b/documentation/src/main/asciidoc/quickstart/guides/tutorial_annotations.adoc @@ -1,104 +1,215 @@ -[[tutorial_annotations]] -== Tutorial Using Native Hibernate APIs and Annotation Mappings +:bootstrap-native-url: {doc-user-guide-url}#bootstrap-native +:entity-model-url: {doc-user-guide-url}#entity -NOTE: This tutorial is located within the download bundle under `annotations/`. +[[tutorial_annotations]] +== Tutorial using native Hibernate APIs .Objectives -- [*] Bootstrap a Hibernate `SessionFactory` +- [*] Configure Hibernate using `hibernate.properties` +- [*] Create a `SessionFactory` using link:{bootstrap-native-url}[native bootstrapping] - [*] Use annotations to provide mapping information -- [*] Use the Hibernate native APIs +- [*] Use `Session` to persist and query data + +**** +This tutorial is located within the download bundle under `annotations/`. +**** [[hibernate-gsg-tutorial-annotations-config]] -=== The Hibernate configuration file +=== Configuration via properties file + +In this example, configuration properties are specified in a file named `hibernate.properties`. + +.Configuration via `hibernate.properties` +[source,properties] +---- +# Database connection settings +hibernate.connection.url=jdbc:h2:mem:db1;DB_CLOSE_DELAY=-1 +hibernate.connection.username=sa +hibernate.connection.password= + +# Echo all executed SQL to console +hibernate.show_sql=true +hibernate.format_sql=true +hibernate.highlight_sql=true + +# Automatically export the schema +hibernate.hbm2ddl.auto=create +---- + +The following properties specify JDBC connection information: + +.JDBC connection settings +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `jakarta.persistence.jdbc.url` | JDBC URL of your database +| `jakarta.persistence.jdbc.user` and `jakarta.persistence.jdbc.password` | Your database credentials +|=== -The contents are identical to <> with one important difference... -The `` element at the very end naming the annotated entity class using the `class` attribute. +[NOTE] +These tutorials use the H2 embedded database, so the values of these properties are specific to running H2 in its in-memory mode. +These properties enable logging of SQL to the console as it is executed, in an aesthetically pleasing format: + +.Settings for SQL logging to the console +[%breakable,cols="35,~"] +|=== +| Configuration property name | Purpose + +| `hibernate.show_sql` | If `true`, log SQL directly to the console +| `hibernate.format_sql` | If `true`, log SQL in a multiline, indented format +| `hibernate.highlight_sql` | If `true`, log SQL with syntax highlighting via ANSI escape codes +|=== + +When developing persistence logic with Hibernate, it's very important to be able to see exactly what SQL is being executed. [[hibernate-gsg-tutorial-annotations-entity]] === The annotated entity Java class -The entity class in this tutorial is `org.hibernate.tutorial.annotations.Event` which follows JavaBean conventions. -In fact the class itself is identical to the one in <>, except that annotations -are used to provide the metadata, rather than a separate mapping file. +The entity class in this tutorial is `org.hibernate.tutorial.annotations.Event`. +Observe that: + +- This class uses standard JavaBean naming conventions for property getter and setter methods, as well as private visibility for the fields. + This is recommended, but it's not a requirement. +- The no-argument constructor, which is also a JavaBean convention, _is_ a requirement for all persistent classes. + Hibernate needs to instantiate objects for you, using Java Reflection. + The constructor should have package-private or `public` visibility, to allow Hibernate to generate proxies and optimized code for field access. + +[NOTE] +The link:{entity-model-url}[Entity types] section of the User Guide covers the complete set of requirements for the entity class. + +We use annotations to identify the class as an entity, and to map it to the relational schema. [[hibernate-gsg-tutorial-annotations-entity-entity]] .Identifying the class as an entity -==== -[source, JAVA] +[source, java] ---- -@Entity -@Table( name = "EVENTS" ) +@Entity <1> +@Table(name = "Events") <2> public class Event { ... } ---- -==== - -The `@jakarta.persistence.Entity` annotation is used to mark a class as an entity. It functions the same as the -`` mapping element discussed in <>. Additionally the -`@jakarta.persistence.Table` annotation explicitly specifies the table name. Without this specification, the default -table name would be _EVENT_. +<1> `@jakarta.persistence.Entity` marks the `Event` class as an entity. +<2> `@jakarta.persistence.Table` explicitly specifies the name of the mapped table. + Without this annotation, the table name would default to `Event`. +Every entity class must have an identifier. [[hibernate-gsg-tutorial-annotations-entity-id]] .Identifying the identifier property -==== -[source, JAVA] +[source, java] ---- -@Id -@GeneratedValue(generator="increment") -@GenericGenerator(name="increment", strategy = "increment") -public Long getId() { - return id; -} +@Id <1> +@GeneratedValue <2> +private Long id; ---- -==== - -`@jakarta.persistence.Id` marks the property which defines the entity's identifier. -`@jakarta.persistence.GeneratedValue` and `@org.hibernate.annotations.GenericGenerator` work in tandem -to indicate that Hibernate should use Hibernate's `increment` generation strategy for this entity's identifier values. +<1> `@jakarta.persistence.Id` marks the field as holding the identifier (primary key) of the entity. +<2> `@jakarta.persistence.GeneratedValue` specifies that this is a _synthetic id_, that is, a system-generated identifier (a surrogate primary key). +Other fields of the entity are considered persistent by default. [[hibernate-gsg-tutorial-annotations-entity-properties]] -.Identifying basic properties -==== -[source, JAVA] +.Mapping basic properties +[source, java] ---- -public String getTitle() { - return title; -} +private String title; -@Temporal(TemporalType.TIMESTAMP) -@Column(name = "EVENT_DATE") -public Date getDate() { - return date; -} +@Column(name = "eventDate") <1> +private LocalDateTime date; ---- -==== - -As in <>, the `date` property needs special handling to account for its special -naming and its SQL type. -Attributes of an entity are considered persistent by default when mapping with annotations, which is why we don't see -any mapping information associated with `title`. +<1> `@jakarta.persistence.Column` explicitly specifies the name of a mapped column. + Without this annotation, the column name would default to `date`, which is a keyword on some databases. [[hibernate-gsg-tutorial-annotations-test]] === Example code -`org.hibernate.tutorial.annotations.AnnotationsIllustrationTest` is essentially the same as -`org.hibernate.tutorial.hbm.NativeApiIllustrationTest` discussed in <>. +The class `org.hibernate.tutorial.annotations.HibernateIllustrationTest` illustrates the use of the Hibernate's native APIs, including: + +- `Session` and `SessionFactory`, and +- `org.hibernate.boot` for configuration and bootstrap. + +There are several different ways to configure and start Hibernate, and this is not even the most common approach. + +[NOTE] +The examples in these tutorials are presented as JUnit tests. +A benefit of this approach is that `setUp()` and `tearDown()` roughly illustrate how a `org.hibernate.SessionFactory` is +created when the program starts, and closed when the program terminates. + +[[hibernate-gsg-tutorial-basic-test-setUp]] +.Obtaining the `SessionFactory` +[source, java] +---- +protected void setUp() { + // A SessionFactory is set up once for an application! + final StandardServiceRegistry registry = + new StandardServiceRegistryBuilder() + .build(); <1> <2> + try { + sessionFactory = + new MetadataSources(registry) <3> + .addAnnotatedClass(Event.class) <4> + .buildMetadata() <5> + .buildSessionFactory(); <6> + } + catch (Exception e) { + // The registry would be destroyed by the SessionFactory, but we + // had trouble building the SessionFactory so destroy it manually. + StandardServiceRegistryBuilder.destroy(registry); + } +} +---- +<1> The `setUp()` method first builds a `StandardServiceRegistry` instance which incorporates configuration information into a working set of `Services` for use by the `SessionFactory`. +<2> Here we put all configuration information in `hibernate.properties`, so there's not much interesting to see. +<3> Using the `StandardServiceRegistry` we create the `MetadataSources` which lets us tell Hibernate about our domain model. +<4> Here we have only one entity class to register. +<5> An instance of `Metadata` represents a complete, partially-validated view of the application domain model. +<6> The final step in the bootstrap process is to build a `SessionFactory` for the configured services and validated domain model. +The `SessionFactory` is a thread-safe object that's instantiated once to serve the entire application. + +The `SessionFactory` produces instances of `Session`. +Each session should be thought of as representing a _unit of work_. + +[[hibernate-gsg-tutorial-basic-test-saving]] +.Persisting entities +[source, java] +---- +sessionFactory.inTransaction(session -> { <1> + session.persist(new Event("Our very first event!", now())); <2> + session.persist(new Event("A follow up event", now())); +}); +---- +<1> The `inTransaction()` method creates a session and starts a new transaction. +<2> Here we create two new `Event` objects and hands them over to Hibernate, calling the `persist()` method to make these instances persistent. +Hibernate is responsible for executing an `INSERT` statement for each `Event`. + + +[[hibernate-gsg-tutorial-basic-test-list]] +.Obtaining a list of entities +[source, java] +---- +sessionFactory.inTransaction(session -> { + session.createSelectionQuery("from Event", Event.class) <1> + .getResultList() <2> + .forEach(event -> out.println("Event (" + event.getDate() + ") : " + event.getTitle())); +}); +---- + +<1> Here we use a very simple _Hibernate Query Language_ (HQL) statement to load all existing `Event` objects from the database. +<2> Hibernate generates and executes the appropriate `SELECT` statement, and then instantiates and populates `Event` objects with the data in the query result set. [[hibernate-gsg-tutorial-annotations-further]] === Take it further! .Practice Exercises -- [ ] Add an association to the `Event` entity to model a message thread. Use the -https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html[_User Guide_] for more details. -- [ ] Add a callback to receive notifications when an `Event` is created, updated or deleted. -Try the same with an event listener. Use the -https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html[_User Guide_] for more details. +- [ ] Actually run this example to see the SQL executed by Hibernate displayed in the console. +- [ ] Reconfigure the examples to connect to your own persistent relational database. +- [ ] Add an association to the `Event` entity to model a message thread. + +// force the break diff --git a/documentation/src/main/asciidoc/quickstart/guides/tutorial_envers.adoc b/documentation/src/main/asciidoc/quickstart/guides/tutorial_envers.adoc index 96866c66bf3a..b0506b5fdb0c 100644 --- a/documentation/src/main/asciidoc/quickstart/guides/tutorial_envers.adoc +++ b/documentation/src/main/asciidoc/quickstart/guides/tutorial_envers.adoc @@ -1,27 +1,25 @@ [[tutorial_envers]] == Tutorial Using Envers -NOTE: This tutorial is located within the download bundle under `envers/`. - .Objectives - [*] Annotate an entity as historical - [*] Configure Envers - [*] Use the Envers APIs to view and analyze historical data +**** +This tutorial is located within the download bundle under `envers/`. +**** [[hibernate-gsg-tutorial-envers-config]] === persistence.xml -This file was discussed in the Jakarta Persistence tutorial in <>, and is essentially the same here. - +This file is unchanged from <>. [[hibernate-gsg-tutorial-envers-entity]] === The annotated entity Java class -Again, the entity is largely the same as in <>. The major difference is the -addition of the `@org.hibernate.envers.Audited` annotation, which tells Envers to automatically track changes to this -entity. - +The entity class is also almost identical to what we had <>. +The major difference is the addition of the annotation `@org.hibernate.envers.Audited`, which tells Envers to automatically track changes to this entity. [[hibernate-gsg-tutorial-envers-test]] === Example code @@ -32,28 +30,23 @@ initial revision as well as the updated revision. A revision refers to a histor [[hibernate-gsg-tutorial-envers-test-api]] .Using the `org.hibernate.envers.AuditReader` -==== -[source, JAVA] +[source, java] ---- public void testBasicUsage() { ... - AuditReader reader = AuditReaderFactory.get( entityManager ); - Event firstRevision = reader.find( Event.class, 2L, 1 ); + AuditReader reader = AuditReaderFactory.get( entityManager ); <1> + Event firstRevision = reader.find( Event.class, 2L, 1 ); <2> ... - Event secondRevision = reader.find( Event.class, 2L, 2 ); + Event secondRevision = reader.find( Event.class, 2L, 2 ); <3> ... } ---- -==== +<1> An `org.hibernate.envers.AuditReader` is obtained from the `org.hibernate.envers.AuditReaderFactory` which wraps the JPA `EntityManager`. +<2> The `find` method retrieves specific revisions of the entity. The first call retrieves revision number 1 of the `Event` with id 2. +<3> Later, the second call asks for revision number 2 of the `Event` with id 2. -We see that an `org.hibernate.envers.AuditReader` is obtained from the `org.hibernate.envers.AuditReaderFactory` -which wraps the `jakarta.persistence.EntityManager`. -Next, the `find` method retrieves specific revisions of the entity. The first call says to find revision number -1 of Event with id 2. The second call says to find revision number 2 of Event with id 2. - - -[[hibernate-gsg-tutorial-annotations-further]] +[[hibernate-gsg-tutorial-envers-further]] === Take it further! .Practice Exercises @@ -61,4 +54,6 @@ Next, the `find` method retrieves specific revisions of the entity. The first c - [*] Write a query to retrieve only historical data which meets some criteria. Use the _User Guide_ to see how Envers queries are constructed. - [*] Experiment with auditing entities which have various forms of relationships (many-to-one, many-to-many, etc). Try -retrieving historical versions (revisions) of such entities and navigating the object tree. \ No newline at end of file +retrieving historical versions (revisions) of such entities and navigating the object tree. + +// force the break \ No newline at end of file diff --git a/documentation/src/main/asciidoc/quickstart/guides/tutorial_jpa.adoc b/documentation/src/main/asciidoc/quickstart/guides/tutorial_jpa.adoc index 1668ce83b766..b4538e7e77a9 100644 --- a/documentation/src/main/asciidoc/quickstart/guides/tutorial_jpa.adoc +++ b/documentation/src/main/asciidoc/quickstart/guides/tutorial_jpa.adoc @@ -1,118 +1,127 @@ [[tutorial_jpa]] -== Tutorial Using the Java Persistence API (Jakarta Persistence) - -NOTE: This tutorial is located within the download bundle under `entitymanager/`. +== Tutorial using JPA-standard APIs .Objectives +- [*] Configure Hibernate using `peristence.xml` - [*] Bootstrap a Jakarta Persistence `EntityManagerFactory` - [*] Use annotations to provide mapping information -- [*] Use Jakarta Persistence API calls +- [*] Use `EntityManager` to persist and query data + +**** +This tutorial is located within the download bundle under `entitymanager/`. +**** [[hibernate-gsg-tutorial-jpa-config]] === persistence.xml -The previous tutorials used the Hibernate-specific `hibernate.cfg.xml` configuration file. Jakarta Persistence, however, defines -a different bootstrap process that uses its own configuration file named `persistence.xml`. This bootstrapping process -is defined by the Jakarta Persistence specification. In Java(TM) SE environments the persistence provider (Hibernate in this case) -is required to locate all Jakarta Persistence configuration files by classpath lookup of the `META-INF/persistence.xml` resource name. - +JPA defines a different bootstrap process, along with a standard configuration file format named `persistence.xml`. +In Java(TM) SE environments the persistence provider (Hibernate) is required to locate every JPA configuration file in the classpath at the path `META-INF/persistence.xml`. [[hibernate-gsg-tutorial-jpa-config-pu]] -.persistence.xml -==== -[source, XML] +.Configuration via `persistence.xml` +[source, xml] ---- - - ... - - ----- -==== + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://java.sun.com/xml/ns/persistence http://java.sun.com/xml/ns/persistence/persistence_2_0.xsd" + version="2.0"> -`persistence.xml` files should provide a unique name for each "persistence unit". Applications use this name to -reference the configuration when obtaining an `jakarta.persistence.EntityManagerFactory` reference. + <1> + + Persistence unit for the Jakarta Persistence tutorial of the Hibernate Getting Started Guide + -The settings defined in the `` element are discussed in <>. -Here the `jakarta.persistence`-prefixed varieties are used when possible. Notice that the remaining -Hibernate-specific configuration setting names are now prefixed with `hibernate.`. + org.hibernate.tutorial.em.Event <2> -NOTE:: Configuration properties prefixed with the legacy Java EE "namespace" (`javax.persistence.*`) are also still -supported, although the Jakarta EE version (`jakarta.persistence.*`) should be preferred + <3> + + + + -Additionally, the `` element functions the same as we saw in <>. + + + + + + + + + + + + +---- +<1> A `persistence.xml` file should provide a unique name for each _persistence unit_ it declares. +Applications use this name to reference the configuration when obtaining an `EntityManagerFactory` as we will see shortly. +<2> The `` element registers our annotated entity class. +<3> The settings specified as `` elements were already discussed in <>. +Here JPA-standard property names are used where possible. + +[NOTE] +==== +Configuration properties prefixed with the legacy Java EE namespace `javax.persistence` are still +recognized, but the Jakarta EE namespace `jakarta.persistence` should be preferred. +==== [[hibernate-gsg-tutorial-jpa-entity]] === The annotated entity Java class -The entity is exactly the same as in <>. - +The entity class is exactly the same as in <>. [[hibernate-gsg-tutorial-jpa-test]] === Example code -The previous tutorials used the Hibernate native APIs. This tutorial uses the Jakarta Persistence APIs. +The previous tutorials used Hibernate native APIs. +This tutorial uses the standard Jakarta Persistence APIs. [[hibernate-gsg-tutorial-jpa-test-setUp]] -.Obtaining the jakarta.persistence.EntityManagerFactory -==== -[source, JAVA] +.Obtaining the JPA EntityManagerFactory +[source, java] ---- -protected void setUp() throws Exception { - sessionFactory = Persistence.createEntityManagerFactory( "org.hibernate.tutorial.jpa" ); +protected void setUp() { + entityManagerFactory = Persistence.createEntityManagerFactory("org.hibernate.tutorial.jpa"); <1> } ---- -==== +<1> Notice again that the persistence unit name is `org.hibernate.tutorial.jpa`, which matches the name from our <>. -Notice again that the persistence unit name is `org.hibernate.tutorial.jpa`, which matches <>. +The code to persist and query entities is almost identical to <>. +Unfortunately, `EntityManagerFactory` doesn't have a nice `inTransaction()` method like `SessionFactory` does, so we had to write our own: -[[hibernate-gsg-tutorial-jpa-test-saving]] -.Saving (persisting) entities -==== -[source, JAVA] ----- -EntityManager entityManager = sessionFactory.createEntityManager(); -entityManager.getTransaction().begin(); -entityManager.persist( new Event( "Our very first event!", new Date() ) ); -entityManager.persist( new Event( "A follow up event", new Date() ) ); -entityManager.getTransaction().commit(); -entityManager.close(); +.Managing transactions in JPA +[source, java] ---- -==== - -The code is similar to <>. The `jakarta.persistence.EntityManager` interface -is used instead of the `org.hibernate.Session` interface. Jakarta Persistence calls this operation "persist" instead of "save". - - -[[hibernate-gsg-tutorial-jpa-test-list]] -.Obtaining a list of entities -==== -[source, JAVA] ----- -entityManager = sessionFactory.createEntityManager(); -entityManager.getTransaction().begin(); -List result = entityManager.createQuery( "from Event", Event.class ).getResultList(); -for ( Event event : result ) { - System.out.println( "Event (" + event.getDate() + ") : " + event.getTitle() ); +void inTransaction(Consumer work) { + EntityManager entityManager = entityManagerFactory.createEntityManager(); + EntityTransaction transaction = entityManager.getTransaction(); + try { + transaction.begin(); + work.accept(entityManager); + transaction.commit(); + } + catch (Exception e) { + if (transaction.isActive()) { + transaction.rollback(); + } + throw e; + } + finally { + entityManager.close(); + } } -entityManager.getTransaction().commit(); -entityManager.close(); ---- -==== - -Again, the code is pretty similar to what we saw in <>. +[TIP] +If you use JPA in Java SE, you'll need to copy/paste this function into your project. Alternatively you could unwrap the `EntityManagerFactory` as a `SessionFactory`. -[[hibernate-gsg-tutorial-annotations-further]] +[[hibernate-gsg-tutorial-jpa-further]] === Take it further! .Practice Exercises -- [ ] Develop an EJB Session bean to investigate implications of using a container-managed -persistence context. Try both stateless and stateful use-cases. -- [ ] Use listeners with CDI-based injection to develop a JMS-based event message hub \ No newline at end of file +- [ ] Learn how to use CDI to inject a container-managed `EntityManager` in Quarkus. + See https://quarkus.io/guides/hibernate-orm[the Quarkus website] for instructions. + +// force the break \ No newline at end of file diff --git a/documentation/src/main/asciidoc/quickstart/guides/tutorial_native.adoc b/documentation/src/main/asciidoc/quickstart/guides/tutorial_native.adoc deleted file mode 100644 index fae3bf0b0195..000000000000 --- a/documentation/src/main/asciidoc/quickstart/guides/tutorial_native.adoc +++ /dev/null @@ -1,231 +0,0 @@ -[[tutorial-native]] -== Tutorial Using Native Hibernate APIs and hbm.xml Mapping - -NOTE: This tutorial is located within the download bundle under `basic/`. - -.Objectives -- [*] Bootstrap a Hibernate `SessionFactory` -- [*] Use Hibernate mapping (`hbm.xml`) files to provide mapping information -- [*] Use the Hibernate native APIs - - -[[hibernate-gsg-tutorial-basic-config]] -=== The Hibernate configuration file - -For this tutorial, the `hibernate.cfg.xml` file defines the Hibernate configuration information. - -The `connection.driver_class`, `connection.url`, `connection.username` and `connection.password` `` elements -define JDBC connection information. These tutorials utilize the H2 in-memory database, so the values of these properties -are all specific to running H2 in its in-memory mode. `connection.pool_size` is used to configure the number of -connections in Hibernate's built-in connection pool. - -IMPORTANT: The built-in Hibernate connection pool is in no way intended for production use. It lacks several -features found on production-ready connection pools. - -The `dialect` property specifies the particular SQL variant with which Hibernate will converse. - -TIP: In most cases, Hibernate is able to properly determine which dialect to use. This is particularly useful -if your application targets multiple databases. - -The `hbm2ddl.auto` property enables automatic generation of database schemas directly into the database. - -Finally, add the mapping file(s) for persistent classes to the configuration. The `resource` attribute of the -`` element causes Hibernate to attempt to locate that mapping as a classpath resource using a -`java.lang.ClassLoader` lookup. - -There are many ways and options to bootstrap a Hibernate `SessionFactory`. For additional details, see -the _Native Bootstrapping_ topical guide. - - -[[hibernate-gsg-tutorial-basic-entity]] -=== The entity Java class - -The entity class for this tutorial is `org.hibernate.tutorial.hbm.Event` - -.Notes About the Entity -* This class uses standard JavaBean naming conventions for property getter and setter methods, as well as -private visibility for the fields. Although this is the recommended design, it is not required. -* The no-argument constructor, which is also a JavaBean convention, is a requirement for all persistent classes. -Hibernate needs to create objects for you, using Java Reflection. The constructor can be private. However, package -or public visibility is required for runtime proxy generation and efficient data retrieval without bytecode -instrumentation. - - -[[hibernate-gsg-tutorial-basic-mapping]] -=== The mapping file - -The mapping file for this tutorial is the classpath resource `org/hibernate/tutorial/hbm/Event.hbm.xml` (as discussed above). - -Hibernate uses the mapping metadata to determine how to load and store objects of the persistent class. The Hibernate -mapping file is one choice for providing Hibernate with this metadata. - - -[[hibernate-gsg-tutorial-basic-mapping-class]] -.The class mapping element -==== -[source, XML] ----- - - ... - ----- -==== - -.Functions of the class mapping element -* The `name` attribute (combined here with the `package` attribute from the containing `` element) -names the FQN of the class to be defined as an entity. -* The `table` attribute names the database table which contains the data for this entity. - -Instances of the `Event` class are now mapped to rows in the `EVENTS` database table. - - -[[hibernate-gsg-tutorial-basic-mapping-id]] -.The id mapping element -==== -[source, XML] ----- - - ... - ----- -==== - -Hibernate uses the property named by the `` element to uniquely identify rows in the table. - -IMPORTANT: It is not required for the id element to map to the table's actual primary key column(s), but it is -the normal convention. Tables mapped in Hibernate do not even need to define primary keys. However, it is strongly -recommend that all schemas define proper referential integrity. Therefore id and primary key are used interchangeably -throughout Hibernate documentation. - -The `` element here names the EVENT_ID column as the primary key of the EVENTS table. It also identifies the -`id` property of the `Event` class as the property containing the identifier value. - -The `generator` element informs Hibernate about which strategy is used to generated primary key values for this entity. -This example uses a simple incrementing count. - -[[hibernate-gsg-tutorial-basic-mapping-property]] -.The property mapping element -==== -[source, XML] ----- - - ----- -==== - -The two `` elements declare the remaining two persistent properties of the `Event` class: `date` and `title`. -The `date` property mapping includes the `column` attribute, but the `title` does not. -In the absence of a `column` attribute, Hibernate uses the property name as the column name. -This is appropriate for `title`, but since `date` is a reserved keyword in most databases, you need to specify a -non-reserved word for the column name. - -The `title` mapping also lacks a type attribute. The types declared and used in the mapping files are neither Java data -types nor SQL database types. Instead, they are *Hibernate mapping types*, which are converters which translate between -Java and SQL data types. Hibernate attempts to determine the correct conversion and mapping type autonomously if the -type attribute is not specified in the mapping, by using Java reflection to determine the Java type of the declared -property and using a default mapping type for that Java type. - -In some cases this automatic detection might not choose the default you expect or need, as seen with the -`date` property. Hibernate cannot know if the property, which is of type `java.util.Date`, should map to an SQL -_DATE_, _TIME_, or _TIMESTAMP_ datatype. Full date and time information is preserved by mapping the property to -the _timestamp_ converter, which identifies the converter as declared by `org.hibernate.type.StandardBasicTypes.TIMESTAMP`. - -TIP: Hibernate determines the mapping type using reflection when the mapping files are processed. This process adds -overhead in terms of time and resources. If startup performance is important, consider explicitly defining the type -to use. - -[[hibernate-gsg-tutorial-basic-test]] -=== Example code - -The `org.hibernate.tutorial.hbm.NativeApiIllustrationTest` class illustrates using the Hibernate native API. - -NOTE: The examples in these tutorials are presented as JUnit tests, for ease of use. One benefit of this -approach is that `setUp` and `tearDown` roughly illustrate how a `org.hibernate.SessionFactory` is created at the -start-up of an application and closed at the end of the application lifecycle. - - -[[hibernate-gsg-tutorial-basic-test-setUp]] -.Obtaining the `org.hibernate.SessionFactory` -==== -[source, JAVA] ----- -protected void setUp() throws Exception { - // A SessionFactory is set up once for an application! - final StandardServiceRegistry registry = new StandardServiceRegistryBuilder() - .configure() // configures settings from hibernate.cfg.xml - .build(); - try { - sessionFactory = new MetadataSources( registry ).buildMetadata().buildSessionFactory(); - } - catch (Exception e) { - // The registry would be destroyed by the SessionFactory, but we had trouble building the SessionFactory - // so destroy it manually. - StandardServiceRegistryBuilder.destroy( registry ); - } -} ----- -==== - -The `setUp` method first builds a `org.hibernate.boot.registry.StandardServiceRegistry` instance which incorporates -configuration information into a working set of Services for use by the SessionFactory. In this tutorial -we defined all configuration information in `hibernate.cfg.xml` so there is not much interesting to see here. - -Using the `StandardServiceRegistry` we create the `org.hibernate.boot.MetadataSources` which is the start point for -telling Hibernate about your domain model. Again, since we defined that in `hibernate.cfg.xml` so there is not much -interesting to see here. - -`org.hibernate.boot.Metadata` represents the complete, partially validated view of the application domain model which the -`SessionFactory` will be based on. - -The final step in the bootstrap process is to build the `SessionFactory`. The `SessionFactory` is a -thread-safe object that is instantiated once to serve the entire application. - -The `SessionFactory` acts as a factory for `org.hibernate.Session` instances, which should be thought of -as a corollary to a "unit of work". - - -[[hibernate-gsg-tutorial-basic-test-saving]] -.Saving entities -==== -[source, JAVA] ----- -Session session = sessionFactory.openSession(); -session.beginTransaction(); -session.save( new Event( "Our very first event!", new Date() ) ); -session.save( new Event( "A follow up event", new Date() ) ); -session.getTransaction().commit(); -session.close(); ----- -==== - -`testBasicUsage()` first creates some new `Event` objects and hands them over to Hibernate for management, using the -`save()` method. Hibernate now takes responsibility to perform an _INSERT_ on the database for each `Event`. - - -[[hibernate-gsg-tutorial-basic-test-list]] -.Obtaining a list of entities -==== -[source, JAVA] ----- -session = sessionFactory.openSession(); -session.beginTransaction(); -List result = session.createQuery( "from Event" ).list(); -for ( Event event : (List) result ) { - System.out.println( "Event (" + event.getDate() + ") : " + event.getTitle() ); -} -session.getTransaction().commit(); -session.close(); ----- -==== - -Here we see an example of the Hibernate Query Language (HQL) to load all existing `Event` objects from the database -by generating the appropriate _SELECT_ SQL, sending it to the database and populating `Event` objects with the result -set data. - - -[[hibernate-gsg-tutorial-annotations-further]] -=== Take it further! - -.Practice Exercises -- [ ] Reconfigure the examples to connect to your own persistent relational database. -- [ ] Add an association to the `Event` entity to model a message thread. diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/AnnotationsIllustrationTest.java b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/AnnotationsIllustrationTest.java deleted file mode 100644 index e9c1114673f1..000000000000 --- a/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/AnnotationsIllustrationTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * Copyright (c) 2010, Red Hat Inc. or third-party contributors as - * indicated by the @author tags or express copyright attribution - * statements applied by the authors. All third-party contributions are - * distributed under license by Red Hat Inc. - * - * This copyrighted material is made available to anyone wishing to use, modify, - * copy, or redistribute it subject to the terms and conditions of the GNU - * Lesser General Public License, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this distribution; if not, write to: - * Free Software Foundation, Inc. - * 51 Franklin Street, Fifth Floor - * Boston, MA 02110-1301 USA - */ -package org.hibernate.tutorial.annotations; - -import java.util.Date; -import java.util.List; - -import org.hibernate.Session; -import org.hibernate.SessionFactory; -import org.hibernate.boot.MetadataSources; -import org.hibernate.boot.registry.StandardServiceRegistry; -import org.hibernate.boot.registry.StandardServiceRegistryBuilder; - -import junit.framework.TestCase; - -/** - * Illustrates the use of Hibernate native APIs. The code here is unchanged from the {@code basic} example, the - * only difference being the use of annotations to supply the metadata instead of Hibernate mapping files. - * - * @author Steve Ebersole - */ -public class AnnotationsIllustrationTest extends TestCase { - private SessionFactory sessionFactory; - - @Override - protected void setUp() throws Exception { - // A SessionFactory is set up once for an application! - final StandardServiceRegistry registry = new StandardServiceRegistryBuilder() - .configure() // configures settings from hibernate.cfg.xml - .build(); - try { - sessionFactory = new MetadataSources( registry ).buildMetadata().buildSessionFactory(); - } - catch (Exception e) { - // The registry would be destroyed by the SessionFactory, but we had trouble building the SessionFactory - // so destroy it manually. - StandardServiceRegistryBuilder.destroy( registry ); - } - } - - @Override - protected void tearDown() throws Exception { - if ( sessionFactory != null ) { - sessionFactory.close(); - } - } - - @SuppressWarnings("unchecked") - public void testBasicUsage() { - // create a couple of events... - Session session = sessionFactory.openSession(); - session.beginTransaction(); - session.save( new Event( "Our very first event!", new Date() ) ); - session.save( new Event( "A follow up event", new Date() ) ); - session.getTransaction().commit(); - session.close(); - - // now lets pull events from the database and list them - session = sessionFactory.openSession(); - session.beginTransaction(); - List result = session.createQuery( "from Event" ).list(); - for ( Event event : (List) result ) { - System.out.println( "Event (" + event.getDate() + ") : " + event.getTitle() ); - } - session.getTransaction().commit(); - session.close(); - } -} diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/Event.java b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/Event.java index d5f3c3541019..d609935d4bb0 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/Event.java +++ b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/Event.java @@ -23,38 +23,36 @@ */ package org.hibernate.tutorial.annotations; -import java.util.Date; +import java.time.LocalDateTime; import jakarta.persistence.Column; import jakarta.persistence.Entity; import jakarta.persistence.GeneratedValue; import jakarta.persistence.Id; import jakarta.persistence.Table; -import jakarta.persistence.Temporal; -import jakarta.persistence.TemporalType; - -import org.hibernate.annotations.GenericGenerator; @Entity -@Table( name = "EVENTS" ) +@Table(name = "Events") public class Event { + + @Id + @GeneratedValue private Long id; private String title; - private Date date; + + @Column(name = "eventDate") + private LocalDateTime date; public Event() { // this form used by Hibernate } - public Event(String title, Date date) { + public Event(String title, LocalDateTime date) { // for application use, to create new events this.title = title; this.date = date; } - @Id - @GeneratedValue(generator="increment") - @GenericGenerator(name="increment", strategy = "increment") public Long getId() { return id; } @@ -63,13 +61,11 @@ private void setId(Long id) { this.id = id; } - @Temporal(TemporalType.TIMESTAMP) - @Column(name = "EVENT_DATE") - public Date getDate() { + public LocalDateTime getDate() { return date; } - public void setDate(Date date) { + public void setDate(LocalDateTime date) { this.date = date; } diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/NativeApiIllustrationTest.java b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/HibernateIllustrationTest.java similarity index 58% rename from documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/NativeApiIllustrationTest.java rename to documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/HibernateIllustrationTest.java index 8ee5d08458f1..958606a7d55e 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/NativeApiIllustrationTest.java +++ b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/java/org/hibernate/tutorial/annotations/HibernateIllustrationTest.java @@ -21,10 +21,9 @@ * 51 Franklin Street, Fifth Floor * Boston, MA 02110-1301 USA */ -package org.hibernate.tutorial.hbm; +package org.hibernate.tutorial.annotations; -import java.util.Date; -import java.util.List; +import java.time.LocalDateTime; import org.hibernate.Session; import org.hibernate.SessionFactory; @@ -34,55 +33,57 @@ import junit.framework.TestCase; +import static java.lang.System.out; +import static java.time.LocalDateTime.now; + /** - * Illustrates use of Hibernate native APIs. + * Illustrates the use of Hibernate native APIs, including the use + * of org.hibernate.boot for configuration and bootstrap. + * Configuration properties are sourced from hibernate.properties. * * @author Steve Ebersole */ -public class NativeApiIllustrationTest extends TestCase { +public class HibernateIllustrationTest extends TestCase { private SessionFactory sessionFactory; @Override - protected void setUp() throws Exception { + protected void setUp() { // A SessionFactory is set up once for an application! - final StandardServiceRegistry registry = new StandardServiceRegistryBuilder() - .configure() // configures settings from hibernate.cfg.xml - .build(); + final StandardServiceRegistry registry = + new StandardServiceRegistryBuilder() + .build(); try { - sessionFactory = new MetadataSources( registry ).buildMetadata().buildSessionFactory(); + sessionFactory = + new MetadataSources(registry) + .addAnnotatedClass(Event.class) + .buildMetadata() + .buildSessionFactory(); } catch (Exception e) { - // The registry would be destroyed by the SessionFactory, but we had trouble building the SessionFactory - // so destroy it manually. - StandardServiceRegistryBuilder.destroy( registry ); + // The registry would be destroyed by the SessionFactory, but we + // had trouble building the SessionFactory so destroy it manually. + StandardServiceRegistryBuilder.destroy(registry); } } @Override - protected void tearDown() throws Exception { + protected void tearDown() { if ( sessionFactory != null ) { sessionFactory.close(); } } - @SuppressWarnings("unchecked") public void testBasicUsage() { // create a couple of events... - Session session = sessionFactory.openSession(); - session.beginTransaction(); - session.save( new Event( "Our very first event!", new Date() ) ); - session.save( new Event( "A follow up event", new Date() ) ); - session.getTransaction().commit(); - session.close(); + sessionFactory.inTransaction(session -> { + session.persist(new Event("Our very first event!", now())); + session.persist(new Event("A follow up event", now())); + }); // now lets pull events from the database and list them - session = sessionFactory.openSession(); - session.beginTransaction(); - List result = session.createQuery( "from Event" ).list(); - for ( Event event : (List) result ) { - System.out.println( "Event (" + event.getDate() + ") : " + event.getTitle() ); - } - session.getTransaction().commit(); - session.close(); + sessionFactory.inTransaction(session -> { + session.createSelectionQuery("from Event", Event.class).getResultList() + .forEach(event -> out.println("Event (" + event.getDate() + ") : " + event.getTitle())); + }); } } diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/resources/hibernate.cfg.xml b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/resources/hibernate.cfg.xml deleted file mode 100644 index 8bcad6c099fd..000000000000 --- a/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/resources/hibernate.cfg.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - org.h2.Driver - jdbc:h2:mem:db1;DB_CLOSE_DELAY=-1 - sa - - - - 1 - - - org.hibernate.dialect.H2Dialect - - - org.hibernate.cache.internal.NoCacheProvider - - - true - - - create - - - - - - - \ No newline at end of file diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/resources/hibernate.properties b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/resources/hibernate.properties new file mode 100644 index 000000000000..2cb72e3d78fa --- /dev/null +++ b/documentation/src/main/asciidoc/quickstart/tutorials/annotations/src/test/resources/hibernate.properties @@ -0,0 +1,12 @@ +# Database connection settings +hibernate.connection.url=jdbc:h2:mem:db1;DB_CLOSE_DELAY=-1 +hibernate.connection.username=sa +hibernate.connection.password= + +# Echo all executed SQL to console +hibernate.show_sql=true +hibernate.format_sql=true +hibernate.highlight_sql=true + +# Automatically export the schema +hibernate.hbm2ddl.auto=create diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/basic/pom.xml b/documentation/src/main/asciidoc/quickstart/tutorials/basic/pom.xml deleted file mode 100644 index 2017a9fff9a2..000000000000 --- a/documentation/src/main/asciidoc/quickstart/tutorials/basic/pom.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - 4.0.0 - - - org.hibernate.tutorials - hibernate-tutorials - $version - ../pom.xml - - - hibernate-tutorial-hbm - Hibernate hbm.xml Tutorial - Hibernate tutorial illustrating the use of native APIs and hbm.xml for mapping metadata - - - - true - - - diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/Event.hbm.xml b/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/Event.hbm.xml deleted file mode 100644 index f786e79eb4e5..000000000000 --- a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/Event.hbm.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/Event.java b/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/Event.java deleted file mode 100644 index ae4c2dfd84bd..000000000000 --- a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/java/org/hibernate/tutorial/hbm/Event.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * Copyright (c) 2010, Red Hat Inc. or third-party contributors as - * indicated by the @author tags or express copyright attribution - * statements applied by the authors. All third-party contributions are - * distributed under license by Red Hat Inc. - * - * This copyrighted material is made available to anyone wishing to use, modify, - * copy, or redistribute it subject to the terms and conditions of the GNU - * Lesser General Public License, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this distribution; if not, write to: - * Free Software Foundation, Inc. - * 51 Franklin Street, Fifth Floor - * Boston, MA 02110-1301 USA - */ -package org.hibernate.tutorial.hbm; - -import java.util.Date; - -public class Event { - private Long id; - - private String title; - private Date date; - - public Event() { - // this form used by Hibernate - } - - public Event(String title, Date date) { - // for application use, to create new events - this.title = title; - this.date = date; - } - - public Long getId() { - return id; - } - - private void setId(Long id) { - this.id = id; - } - - public Date getDate() { - return date; - } - - public void setDate(Date date) { - this.date = date; - } - - public String getTitle() { - return title; - } - - public void setTitle(String title) { - this.title = title; - } -} \ No newline at end of file diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/resources/hibernate.cfg.xml b/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/resources/hibernate.cfg.xml deleted file mode 100644 index 9f7caa7269e6..000000000000 --- a/documentation/src/main/asciidoc/quickstart/tutorials/basic/src/test/resources/hibernate.cfg.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - org.h2.Driver - jdbc:h2:mem:db1;DB_CLOSE_DELAY=-1 - sa - - - - 1 - - - org.hibernate.dialect.H2Dialect - - - org.hibernate.cache.internal.NoCacheProvider - - - true - - - create - - - - - - \ No newline at end of file diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/Event.java b/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/Event.java index 253d44bd98c1..dc5a9257fc56 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/Event.java +++ b/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/Event.java @@ -23,61 +23,57 @@ */ package org.hibernate.tutorial.em; -import java.util.Date; +import java.time.LocalDateTime; import jakarta.persistence.Column; import jakarta.persistence.Entity; import jakarta.persistence.GeneratedValue; import jakarta.persistence.Id; import jakarta.persistence.Table; -import jakarta.persistence.Temporal; -import jakarta.persistence.TemporalType; - -import org.hibernate.annotations.GenericGenerator; @Entity -@Table( name = "EVENTS" ) +@Table(name = "Events") public class Event { - private Long id; - private String title; - private Date date; + @Id + @GeneratedValue + private Long id; + + private String title; + + @Column(name = "eventDate") + private LocalDateTime date; public Event() { // this form used by Hibernate } - public Event(String title, Date date) { + public Event(String title, LocalDateTime date) { // for application use, to create new events this.title = title; this.date = date; } - @Id - @GeneratedValue(generator="increment") - @GenericGenerator(name="increment", strategy = "increment") - public Long getId() { + public Long getId() { return id; - } + } - private void setId(Long id) { + private void setId(Long id) { this.id = id; - } + } - @Temporal(TemporalType.TIMESTAMP) - @Column(name = "EVENT_DATE") - public Date getDate() { + public LocalDateTime getDate() { return date; - } + } - public void setDate(Date date) { + public void setDate(LocalDateTime date) { this.date = date; - } + } - public String getTitle() { + public String getTitle() { return title; - } + } - public void setTitle(String title) { + public void setTitle(String title) { this.title = title; - } + } } diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/EntityManagerIllustrationTest.java b/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/JPAIllustrationTest.java similarity index 53% rename from documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/EntityManagerIllustrationTest.java rename to documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/JPAIllustrationTest.java index 2aecd8134ca1..0f7adc680320 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/EntityManagerIllustrationTest.java +++ b/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/java/org/hibernate/tutorial/em/JPAIllustrationTest.java @@ -23,51 +23,73 @@ */ package org.hibernate.tutorial.em; -import java.util.Date; -import java.util.List; +import java.util.function.Consumer; + import jakarta.persistence.EntityManager; import jakarta.persistence.EntityManagerFactory; -import jakarta.persistence.Persistence; +import jakarta.persistence.EntityTransaction; import junit.framework.TestCase; +import static java.lang.System.out; +import static java.time.LocalDateTime.now; + +import static jakarta.persistence.Persistence.createEntityManagerFactory; + + /** * Illustrates basic use of Hibernate as a Jakarta Persistence provider. + * Configuration properties are sourced from persistence.xml. * * @author Steve Ebersole */ -public class EntityManagerIllustrationTest extends TestCase { +public class JPAIllustrationTest extends TestCase { private EntityManagerFactory entityManagerFactory; @Override - protected void setUp() throws Exception { - // like discussed with regards to SessionFactory, an EntityManagerFactory is set up once for an application - // IMPORTANT: notice how the name here matches the name we gave the persistence-unit in persistence.xml! - entityManagerFactory = Persistence.createEntityManagerFactory( "org.hibernate.tutorial.jpa" ); + protected void setUp() { + // an EntityManagerFactory is set up once for an application + // IMPORTANT: notice how the name here matches the name we + // gave the persistence-unit in persistence.xml + entityManagerFactory = createEntityManagerFactory("org.hibernate.tutorial.jpa"); } @Override - protected void tearDown() throws Exception { + protected void tearDown() { entityManagerFactory.close(); } public void testBasicUsage() { // create a couple of events... - EntityManager entityManager = entityManagerFactory.createEntityManager(); - entityManager.getTransaction().begin(); - entityManager.persist( new Event( "Our very first event!", new Date() ) ); - entityManager.persist( new Event( "A follow up event", new Date() ) ); - entityManager.getTransaction().commit(); - entityManager.close(); + inTransaction(entityManager -> { + entityManager.persist(new Event("Our very first event!", now())); + entityManager.persist(new Event("A follow up event", now())); + }); // now lets pull events from the database and list them - entityManager = entityManagerFactory.createEntityManager(); - entityManager.getTransaction().begin(); - List result = entityManager.createQuery( "from Event", Event.class ).getResultList(); - for ( Event event : result ) { - System.out.println( "Event (" + event.getDate() + ") : " + event.getTitle() ); + inTransaction(entityManager -> { + entityManager.createQuery("select e from Event e", Event.class).getResultList() + .forEach(event -> out.println("Event (" + event.getDate() + ") : " + event.getTitle())); + }); + } + + void inTransaction(Consumer work) { + EntityManager entityManager = entityManagerFactory.createEntityManager(); + EntityTransaction transaction = entityManager.getTransaction(); + try { + transaction.begin(); + work.accept(entityManager); + transaction.commit(); + } + catch (Exception e) { + if (transaction.isActive()) { + transaction.rollback(); + } + throw e; + } + finally { + entityManager.close(); } - entityManager.getTransaction().commit(); - entityManager.close(); } + } diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/resources/META-INF/persistence.xml b/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/resources/META-INF/persistence.xml index 69aeb58d523d..db0787396dd9 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/resources/META-INF/persistence.xml +++ b/documentation/src/main/asciidoc/quickstart/tutorials/entitymanager/src/test/resources/META-INF/persistence.xml @@ -17,13 +17,18 @@ org.hibernate.tutorial.em.Event - + + + + + - + + diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/EnversIllustrationTest.java b/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/EnversIllustrationTest.java index d821a1182ecc..808ca2db7b0e 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/EnversIllustrationTest.java +++ b/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/EnversIllustrationTest.java @@ -27,18 +27,22 @@ import java.util.List; import jakarta.persistence.EntityManager; import jakarta.persistence.EntityManagerFactory; -import jakarta.persistence.Persistence; import junit.framework.TestCase; import org.hibernate.envers.AuditReader; import org.hibernate.envers.AuditReaderFactory; +import static java.time.LocalDateTime.now; + +import static jakarta.persistence.Persistence.createEntityManagerFactory; + /** - * Illustrates the set up and use of Envers. + * Illustrates the setup and use of Envers. *

- * This example is different from the others in that we really need to save multiple revisions to the entity in - * order to get a good look at Envers in action. + * This example is different from the others because we need to have + * multiple revisions to the entity in order to get a good look at + * Envers in action. * * @author Steve Ebersole */ @@ -46,14 +50,12 @@ public class EnversIllustrationTest extends TestCase { private EntityManagerFactory entityManagerFactory; @Override - protected void setUp() throws Exception { - // like discussed with regards to SessionFactory, an EntityManagerFactory is set up once for an application - // IMPORTANT: notice how the name here matches the name we gave the persistence-unit in persistence.xml! - entityManagerFactory = Persistence.createEntityManagerFactory( "org.hibernate.tutorial.envers" ); + protected void setUp() { + entityManagerFactory = createEntityManagerFactory( "org.hibernate.tutorial.envers" ); } @Override - protected void tearDown() throws Exception { + protected void tearDown() { entityManagerFactory.close(); } @@ -61,8 +63,8 @@ public void testBasicUsage() { // create a couple of events EntityManager entityManager = entityManagerFactory.createEntityManager(); entityManager.getTransaction().begin(); - entityManager.persist( new Event( "Our very first event!", new Date() ) ); - entityManager.persist( new Event( "A follow up event", new Date() ) ); + entityManager.persist( new Event( "Our very first event!", now() ) ); + entityManager.persist( new Event( "A follow up event", now() ) ); entityManager.getTransaction().commit(); entityManager.close(); @@ -82,7 +84,7 @@ public void testBasicUsage() { entityManager = entityManagerFactory.createEntityManager(); entityManager.getTransaction().begin(); Event myEvent = entityManager.find( Event.class, 2L ); // we are using the increment generator, so we know 2 is a valid id - myEvent.setDate( new Date() ); + myEvent.setDate( now() ); myEvent.setTitle( myEvent.getTitle() + " (rescheduled)" ); entityManager.getTransaction().commit(); entityManager.close(); diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/Event.java b/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/Event.java index 36dcc05866d1..9cb5dd661b69 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/Event.java +++ b/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/java/org/hibernate/tutorial/envers/Event.java @@ -23,70 +23,60 @@ */ package org.hibernate.tutorial.envers; -import java.util.Date; +import java.time.LocalDateTime; import jakarta.persistence.Column; import jakarta.persistence.Entity; import jakarta.persistence.GeneratedValue; import jakarta.persistence.Id; import jakarta.persistence.Table; -import jakarta.persistence.Temporal; -import jakarta.persistence.TemporalType; -import org.hibernate.annotations.GenericGenerator; import org.hibernate.envers.Audited; @Entity -@Table( name = "EVENTS" ) -@Audited // <--- this tell Envers to audit (track changes to) this entity +@Table(name = "Events") +@Audited // <--- this tells Envers to audit (track changes to) this entity public class Event { - private Long id; - private String title; - private Date date; + @Id + @GeneratedValue + private Long id; + + private String title; + + @Column(name = "eventDate") + private LocalDateTime date; public Event() { // this form used by Hibernate } - public Event(String title, Date date) { + public Event(String title, LocalDateTime date) { // for application use, to create new events this.title = title; this.date = date; } - @Id - @GeneratedValue(generator="increment") - @GenericGenerator(name="increment", strategy = "increment") - public Long getId() { + public Long getId() { return id; - } + } - private void setId(Long id) { + private void setId(Long id) { this.id = id; - } + } - @Temporal(TemporalType.TIMESTAMP) - @Column(name = "EVENT_DATE") - public Date getDate() { + public LocalDateTime getDate() { return date; - } + } - public void setDate(Date date) { + public void setDate(LocalDateTime date) { this.date = date; - } + } - public String getTitle() { + public String getTitle() { return title; - } + } - public void setTitle(String title) { + public void setTitle(String title) { this.title = title; - } - - @Override - public int hashCode() { - int result = title.hashCode(); - result = 31 * result + date.hashCode(); - return result; } } diff --git a/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/resources/META-INF/persistence.xml b/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/resources/META-INF/persistence.xml index ccacbcbbb24b..e84f11cc2a8b 100644 --- a/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/resources/META-INF/persistence.xml +++ b/documentation/src/main/asciidoc/quickstart/tutorials/envers/src/test/resources/META-INF/persistence.xml @@ -17,13 +17,18 @@ org.hibernate.tutorial.envers.Event - + + + + + - + + diff --git a/documentation/src/main/asciidoc/shared/background.adoc b/documentation/src/main/asciidoc/shared/background.adoc new file mode 100644 index 000000000000..03430d936447 --- /dev/null +++ b/documentation/src/main/asciidoc/shared/background.adoc @@ -0,0 +1,12 @@ + +While a strong background in SQL is not required to use Hibernate, a basic understanding of its concepts is useful - especially the principles of _data modeling_. +Understanding the basics of transactions and design patterns such as _Unit of Work_ are important as well. + +[[useful-resources]] +.Useful background resources +**** +- https://en.wikipedia.org/wiki/Data_modeling[Data Modeling (Wikipedia)]. +- https://www.agiledata.org/essays/dataModeling101.html[Data Modeling 101] +- https://www.marcobehler.com/guides/a-guide-to-accessing-databases-in-java[Java & Databases: An Overview of Libraries & APIs] +- https://martinfowler.com/eaaCatalog/unitOfWork.html[Unit of Work] +**** diff --git a/documentation/src/main/asciidoc/shared/common-attributes.adoc b/documentation/src/main/asciidoc/shared/common-attributes.adoc new file mode 100644 index 000000000000..63cecd7a86cc --- /dev/null +++ b/documentation/src/main/asciidoc/shared/common-attributes.adoc @@ -0,0 +1,3 @@ + +// NOTE : majorMinorVersion is injected by the build and will override this value +:majorMinorVersion: 6.2 \ No newline at end of file diff --git a/documentation/src/main/asciidoc/shared/filesystem-attributes.adoc b/documentation/src/main/asciidoc/shared/filesystem-attributes.adoc new file mode 100644 index 000000000000..4b0c59edba7a --- /dev/null +++ b/documentation/src/main/asciidoc/shared/filesystem-attributes.adoc @@ -0,0 +1,15 @@ +// **************************************************************************** +// Centralized definition of Asciidoc attributes for local filesystem paths +// **************************************************************************** + +:doc-main-dir: ../.. +:doc-main-asciidoc-dir: {doc-main-dir}/asciidoc +:doc-main-style-dir: {doc-main-dir}/style +:pdf-theme: {doc-main-style-dir}/pdf/theme.yml +:pdf-fontsdir: {doc-main-style-dir}/pdf/fonts +//:title-logo-image: {doc-main-style-dir}/asciidoctor/images/org/hibernate/logo.png[] + +:root-project-dir: ../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:documentation-project-dir: {root-project-dir}/documentation +:testing-project-dir: {root-project-dir}/hibernate-testing diff --git a/documentation/src/main/asciidoc/shared/renderer-attributes.adoc b/documentation/src/main/asciidoc/shared/renderer-attributes.adoc new file mode 100644 index 000000000000..0aa3837684d6 --- /dev/null +++ b/documentation/src/main/asciidoc/shared/renderer-attributes.adoc @@ -0,0 +1,3 @@ +:source-highlighter: rouge +:icons: font +:doctype: book \ No newline at end of file diff --git a/documentation/src/main/asciidoc/shared/url-attributes.adoc b/documentation/src/main/asciidoc/shared/url-attributes.adoc new file mode 100644 index 000000000000..12c29a944d4f --- /dev/null +++ b/documentation/src/main/asciidoc/shared/url-attributes.adoc @@ -0,0 +1,23 @@ +// **************************************************************************** +// Centralized definition of Asciidoc attributes for documentation urls +// **************************************************************************** + +include::./common-attributes.adoc[] + +:doc-base-url: https://docs.jboss.org/hibernate/orm +:doc-version-base-url: {doc-base-url}/{majorMinorVersion} +:doc-migration-guide-url: {doc-version-base-url}/migration-guide/migration-guide.html +:doc-quick-start-url: {doc-version-base-url}/quickstart/html_single/ +:doc-query-language-url: {doc-version-base-url}/querylanguage/html_single/Hibernate_Query_Language.html +:doc-introduction-url: {doc-version-base-url}/introduction/html_single/Hibernate_Introduction.html +:doc-user-guide-url: {doc-version-base-url}/userguide/html_single/Hibernate_User_Guide.html +:doc-javadoc-url: {doc-version-base-url}/javadocs/ +:doc-topical-url: {doc-version-base-url}/topical/html_single/ +:doc-registries-url: {doc-topical-url}/registries/ServiceRegistries.html +:doc-logging-url: {doc-topical-url}/logging/Logging.html + +:report-deprecation-url: {doc-version-base-url}/deprecated/deprecating.txt +:report-dialect-url: {doc-version-base-url}/dialect/dialect.html +:report-incubating-url: {doc-version-base-url}/incubating/incubating.txt +:report-internals-url: {doc-version-base-url}/internals/internal.txt +:report-logging-url: {doc-version-base-url}/logging/logging.html diff --git a/documentation/src/main/asciidoc/topical/index.adoc b/documentation/src/main/asciidoc/topical/index.adoc index d6798303c575..628562548d79 100644 --- a/documentation/src/main/asciidoc/topical/index.adoc +++ b/documentation/src/main/asciidoc/topical/index.adoc @@ -1,36 +1,52 @@ +:shared-attributes-dir: ../shared/ + +include::{shared-attributes-dir}/common-attributes.adoc[] +include::{shared-attributes-dir}/url-attributes.adoc[] +include::{shared-attributes-dir}/filesystem-attributes.adoc[] +include::{shared-attributes-dir}/renderer-attributes.adoc[] + = Topical Guides -:userguide_rel_link: ../../userguide/html_single/Hibernate_User_Guide.html -:integrationguid_rel_link: ../../integrationguide/html_single/Hibernate_Integration_Guide.html -:migrationguide_rel_link: ../../migration-guide/migration-guide.html -:logging_rel_link: ../../logging/logging.html -:incubating_rel_link: ../../incubating/incubating.txt -:internal_rel_link: ../../internals/internal.txt :toc: Hibernate documentation is organized into several guides that cover specific topics. This guide provides links to -all the guides to give some direction on where to look for information. +all the documentation to give some direction on where to look for information. -NOTE: This is still very much a work in progress. <> is definitely welcome! +[TIP] +==== +See the link:{doc-migration-guide-url}[Migration Guide] for details about migration to Hibernate {majorMinorVersion}. +==== -== User Guides -* For information on bootstrapping Hibernate -** For bootstrapping a SessionFactory, see the link:{userguide_rel_link}#bootstrap-native[Native Bootstrapping Guide] -** For bootstrapping an EntityManagerFactory (Jakarta Persistence) using Hibernate, see the link:{userguide_rel_link}#bootstrap-jpa[Jakarta Persistence Bootstrap Guide] -** For (semi-deprecated) bootstrapping of a SessionFactory using the legacy Configuration approach, see the link:{userguide_rel_link}#appendix-legacy-bootstrap[Legacy Bootstrap Guide] -* For information on generated (non-identifier) values, see the <> -* Others coming soon + +[[usage-guides]] +== Usage Guides + +Hibernate publishes multiple usage guides - + +link:{doc-quick-start-url}[Quick Start]:: + Tutorial style guide for quickly getting started with Hibernate. +link:{doc-introduction-url}[Introduction to Hibernate]:: + High-level look at the most used Hibernate features. +link:{doc-query-language-url}[Guide to the Hibernate Query Language]:: + Discussion of the Hibernate Query Language. +link:{doc-user-guide-url}[User Guide]:: + Detailed discussion of all features of Hibernate. More of a reference manual. -== Migration -* For information on how to migrate to the current Hibernate 6 version check out the link:{migrationguide_rel_link}[Migration Guide] +[[logging]] +== Logging + +The link:{doc-logging-url}[Logging Guide] discusses logging in Hibernate. +[[tooling]] == Tooling -* See the link:{userguide_rel_link}#tooling[Tooling Guide] for information on: -** Bytecode enhancement -** Static Metamodel Generation -** Gradle, Maven, and Ant plugins -** Schema management (coming soon) + +See the link:{doc-user-guide-url}#tooling[Tooling Guide] for information on: + +* Bytecode enhancement +* Static Metamodel Generation +* Gradle, Maven, and Ant plugins +* Schema management == Integrator Guides @@ -38,18 +54,45 @@ NOTE: This is still very much a work in progress. <> is definitely * Others coming soon -== Logging -Check out the link:{logging_rel_link}[Logging Guide] for a list of available Hibernate loggers +[[support]] +== Compatibility and Support + +Compatibility is defined as ... + +Support is defined as ... + +By default, all parts of Hibernate are considered supported and follow the general backwards compatibility https://hibernate.org/community/compatibility-policy/[policy]. +Various things influence the levels of support and backwards compatibility to expect in regard to parts of Hibernate. + +[[support-internal]] +=== Internal + +Internal parts of Hibernate are simply that - internal implementation details that are not supported for use by applications. +Use of internal contracts should be avoided as they are not supported and could be removed or changed at any time. + +Contracts considered internal are defined in one of 2 ways: + +1. The Java class (interface, etc.) is defined under a package named `internal`, either directly or as a sub-package +2. Through the use of the `@Internal` annotation. + +See the definitive set of link:{report-internals-url}[internals]. + + +[[support-incubating]] +=== Incubating + +Incubating contracts are generally new contracts which are still being actively designed, similar to +"tech preview" features. +Such contracts are subject to change and should be used with that understanding. + +See the definitive set of link:{report-incubating-url}[incubations]. +[[support-deprecation]] +=== Deprecations -== Miscellaneous -* For a list of `@Incubating` source elements, see the link:{incubating_rel_link}[incubating] list. -* For a list of `@Internal` source elements, see the link:{internal_rel_link}[internal] list. +1. `@Deprecated` +2. `@Remove` +See the definitive set of link:{report-deprecation-url}[deprecations]. -[[helping]] -== Helping -* Reporting/fixing problems. Report any typos or inaccurate information to the -https://hibernate.atlassian.net/browse/HHH[Hibernate ORM Jira] using the +documentation+ component. Follow the same -guidelines for contributing fixes and improvements as for contributing source code. \ No newline at end of file diff --git a/documentation/src/main/asciidoc/topical/logging/Logging.adoc b/documentation/src/main/asciidoc/topical/logging/Logging.adoc new file mode 100644 index 000000000000..4422ff99f054 --- /dev/null +++ b/documentation/src/main/asciidoc/topical/logging/Logging.adoc @@ -0,0 +1,19 @@ +:shared-attributes-dir: ../../shared/ + +include::{shared-attributes-dir}/common-attributes.adoc[] +include::{shared-attributes-dir}/url-attributes.adoc[] +include::{shared-attributes-dir}/filesystem-attributes.adoc[] +include::{shared-attributes-dir}/renderer-attributes.adoc[] + +:jboss-logging-url: + +[[root]] += Logging + +Hibernate uses the link:{jboss-logging-url}[JBoss Logging] library for its logging API. + +JBoss Logging is a logging facade, similar to SLF4J. + +[[logger-names]] +== Logger Names + diff --git a/documentation/src/main/asciidoc/userguide/Bibliography.adoc b/documentation/src/main/asciidoc/userguide/Bibliography.adoc index 60170c18c267..b958d80eebc3 100644 --- a/documentation/src/main/asciidoc/userguide/Bibliography.adoc +++ b/documentation/src/main/asciidoc/userguide/Bibliography.adoc @@ -1,6 +1,5 @@ -== References - [bibliography] +== References - [[[PoEAA]]] Martin Fowler. https://www.martinfowler.com/books/eaa.html[Patterns of Enterprise Application Architecture]. Addison-Wesley Professional. 2002. - [[[JPwH]]] Christian Bauer & Gavin King. https://www.manning.com/books/java-persistence-with-hibernate-second-edition[Java Persistence with Hibernate, Second Edition]. Manning Publications Co. 2015. diff --git a/documentation/src/main/asciidoc/userguide/ConfigPropertyList.adoc b/documentation/src/main/asciidoc/userguide/ConfigPropertyList.adoc deleted file mode 100644 index 0cdee70fc795..000000000000 --- a/documentation/src/main/asciidoc/userguide/ConfigPropertyList.adoc +++ /dev/null @@ -1,3 +0,0 @@ -== List of all available configuration properties - -include::../../../../target/configs.asciidoc[opts=optional] \ No newline at end of file diff --git a/documentation/src/main/asciidoc/userguide/Hibernate_User_Guide.adoc b/documentation/src/main/asciidoc/userguide/Hibernate_User_Guide.adoc index a781d8199fee..63dae70c76f3 100644 --- a/documentation/src/main/asciidoc/userguide/Hibernate_User_Guide.adoc +++ b/documentation/src/main/asciidoc/userguide/Hibernate_User_Guide.adoc @@ -1,5 +1,11 @@ -= Hibernate ORM {fullVersion} User Guide -Vlad Mihalcea, Steve Ebersole, Andrea Boriero, Gunnar Morling, Gail Badner, Chris Cranford, Emmanuel Bernard, Sanne Grinovero, Brett Meyer, Hardy Ferentschik, Gavin King, Christian Bauer, Max Rydahl Andersen, Karel Maesen, Radim Vansa, Louis Jacomet +:shared-attributes-dir: ../shared/ + +include::{shared-attributes-dir}/common-attributes.adoc[] +include::{shared-attributes-dir}/url-attributes.adoc[] +include::{shared-attributes-dir}/filesystem-attributes.adoc[] +include::{shared-attributes-dir}/renderer-attributes.adoc[] + += Hibernate ORM User Guide :toc2: :toclevels: 3 :sectanchors: @@ -33,15 +39,13 @@ include::chapters/beans/Beans.adoc[] include::chapters/portability/Portability.adoc[] include::chapters/statistics/Statistics.adoc[] include::chapters/tooling/Tooling.adoc[] - -include::appendices/Configurations.adoc[] -include::appendices/Annotations.adoc[] include::appendices/BestPractices.adoc[] -include::appendices/Legacy_Bootstrap.adoc[] -include::appendices/Legacy_DomainModel.adoc[] + +include::Credits.adoc[] + +include::appendices/SettingsReference.adoc[] include::appendices/LegacyBasicTypeResolution.adoc[] include::appendices/Legacy_Native_Queries.adoc[] -include::ConfigPropertyList.adoc[] include::Bibliography.adoc[] diff --git a/documentation/src/main/asciidoc/userguide/Preface.adoc b/documentation/src/main/asciidoc/userguide/Preface.adoc index 4e307c1afd3f..5c2de61d009f 100644 --- a/documentation/src/main/asciidoc/userguide/Preface.adoc +++ b/documentation/src/main/asciidoc/userguide/Preface.adoc @@ -1,3 +1,6 @@ +:shared-attributes-dir: ../shared/ +include::{shared-attributes-dir}/url-attributes.adoc[] + [[preface]] == Preface @@ -14,32 +17,31 @@ However, unlike many other persistence solutions, Hibernate does not hide the po Hibernate may not be the best solution for data-centric applications that only use stored-procedures to implement the business logic in the database, it is most useful with object-oriented domain models and business logic in the Java-based middle-tier. However, Hibernate can certainly help you to remove or encapsulate vendor-specific SQL code and will help with the common task of result set translation from a tabular representation to a graph of objects. -=== Get Involved +[[system-requirements]] +=== System Requirements -* Use Hibernate and report any bugs or issues you find. See https://hibernate.org/issuetracker[Issue Tracker] for details. -* Try your hand at fixing some bugs or implementing enhancements. Again, see https://hibernate.org/issuetracker[Issue Tracker]. -* Engage with the community using mailing lists, forums, IRC, or other ways listed in the https://hibernate.org/community[Community section]. -* Help improve or translate this documentation. Contact us on the developer mailing list if you have interest. -* Spread the word. Let the rest of your organization know about the benefits of Hibernate. +Hibernate {majorMinorVersion} requires at least Java 11 and JDBC 4.2. -== System Requirements +[[getting-started]] +=== Getting Started -Hibernate 6.0 and later versions require at least Java 11 and JDBC 4.2. +include::{shared-attributes-dir}/background.adoc[] -=== Getting Started Guide +[TIP] +==== +New users may want to first look at the tutorial-style link:{doc-quick-start-url}[Quick Start] guide. -New users may want to first look through the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/quickstart/html_single/[Hibernate Getting Started Guide] for basic information as well as tutorials. -There is also a series of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/topical/html_single/[topical guides] providing deep dives into various topics. +This User Guide is really more of a reference guide. +For a more high-level discussion of the most used features of Hibernate, see the link:{doc-introduction-url}[Introduction to Hibernate] guide. -[NOTE] +There is also a series of link:{doc-topical-url}[topical guides] providing deep dives into various topics such as logging, compatibility and support, etc. ==== -While having a strong background in SQL is not required to use Hibernate, it certainly helps a lot because it all boils down to SQL statements. -Probably even more important is an understanding of data modeling principles. -You might want to consider these resources as a good starting point: -* https://en.wikipedia.org/wiki/Data_modeling[Data modeling Wikipedia definition] -* https://www.agiledata.org/essays/dataModeling101.html[Data Modeling 101] -Understanding the basics of transactions and design patterns such as _Unit of Work_ (<>) or _Application Transaction_ are important as well. -These topics will be discussed in the documentation, but a prior understanding will certainly help. -==== +=== Get Involved + +* Use Hibernate and report any bugs or issues you find. See https://hibernate.org/issuetracker[Issue Tracker] for details. +* Try your hand at fixing some bugs or implementing enhancements. Again, see https://hibernate.org/issuetracker[Issue Tracker]. +* Engage with the community using the methods listed in the https://hibernate.org/community[Community section]. +* Help improve this documentation. Contact us on the developer mailing list or Zulip if you have interest. +* Spread the word. Let the rest of your organization know about the benefits of Hibernate. diff --git a/documentation/src/main/asciidoc/userguide/appendices/Annotations.adoc b/documentation/src/main/asciidoc/userguide/appendices/Annotations.adoc deleted file mode 100644 index 511aebdded2c..000000000000 --- a/documentation/src/main/asciidoc/userguide/appendices/Annotations.adoc +++ /dev/null @@ -1,1419 +0,0 @@ -[[annotations]] -== Mapping annotations - -[[annotations-jpa]] -=== Jakarta Persistence annotations - -[[annotations-jpa-access]] -==== `@Access` - -The {jpaJavadocUrlPrefix}Access.html[`@Access`] annotation is used to specify the access type of the associated entity class, mapped superclass, or embeddable class, or entity attribute. - -See the <> section for more info. - -[[annotations-jpa-associationoverride]] -==== `@AssociationOverride` - -The {jpaJavadocUrlPrefix}AssociationOverride.html[`@AssociationOverride`] annotation is used to override an association mapping (e.g. `@ManyToOne`, `@OneToOne`, `@OneToMany`, `@ManyToMany`) inherited from a mapped superclass or an embeddable. - -See the <> section for more info. - -[[annotations-jpa-associationoverrides]] -==== `@AssociationOverrides` - -The {jpaJavadocUrlPrefix}AssociationOverrides.html[`@AssociationOverrides`] is used to group several <> annotations. - -[[annotations-jpa-attributeoverride]] -==== `@AttributeOverride` - -The {jpaJavadocUrlPrefix}AttributeOverride.html[`@AttributeOverride`] annotation is used to override an attribute mapping inherited from a mapped superclass or an embeddable. - -See the <> section for more info. - -[[annotations-jpa-attributeoverrides]] -==== `@AttributeOverrides` - -The {jpaJavadocUrlPrefix}AttributeOverrides.html[`@AttributeOverrides`] is used to group several <> annotations. - -[[annotations-jpa-basic]] -==== `@Basic` - -The {jpaJavadocUrlPrefix}Basic.html[`@Basic`] annotation is used to map a basic attribute type to a database column. - -See the <> chapter for more info. - -[[annotations-jpa-cacheable]] -==== `@Cacheable` - -The {jpaJavadocUrlPrefix}Cacheable.html[`@Cacheable`] annotation is used to specify whether an entity should be stored in the second-level cache. - -If the `persistence.xml` `shared-cache-mode` XML attribute is set to `ENABLE_SELECTIVE`, then only the entities annotated with the `@Cacheable` are going to be stored in the second-level cache. - -If `shared-cache-mode` XML attribute value is `DISABLE_SELECTIVE`, then the entities marked with the `@Cacheable` annotation are not going to be stored in the second-level cache, while all the other entities are stored in the cache. - -See the <> chapter for more info. - -[[annotations-jpa-collectiontable]] -==== `@CollectionTable` - -The {jpaJavadocUrlPrefix}CollectionTable.html[`@CollectionTable`] annotation is used to specify the database table that stores the values of a basic or an embeddable type collection. - -See the <> section for more info. - -[[annotations-jpa-column]] -==== `@Column` - -The {jpaJavadocUrlPrefix}Column.html[`@Column`] annotation is used to specify the mapping between a basic entity attribute and the database table column. - -See the <> section for more info. - -[[annotations-jpa-columnresult]] -==== `@ColumnResult` - -The {jpaJavadocUrlPrefix}ColumnResult.html[`@ColumnResult`] annotation is used in conjunction with the <> or <> annotations to map a SQL column for a given SELECT query. - -See the <> section for more info. - -[[annotations-jpa-constructorresult]] -==== `@ConstructorResult` - -The {jpaJavadocUrlPrefix}ConstructorResult.html[`@ConstructorResult`] annotation is used in conjunction with the <> annotations to map columns of a given SELECT query to a certain object constructor. - -See the <> section for more info. - -[[annotations-jpa-convert]] -==== `@Convert` - -The {jpaJavadocUrlPrefix}Convert.html[`@Convert`] annotation is used to specify the {jpaJavadocUrlPrefix}AttributeConverter.html[`AttributeConverter`] implementation used to convert the currently annotated basic attribute. - -If the `AttributeConverter` uses {jpaJavadocUrlPrefix}Converter.html#autoApply--[`autoApply`], then all entity attributes with the same target type are going to be converted automatically. - -See the <> section for more info. - -[[annotations-jpa-converter]] -==== `@Converter` - -The {jpaJavadocUrlPrefix}Converter.html[`@Converter`] annotation is used to specify that the currently annotated {jpaJavadocUrlPrefix}AttributeConverter.html[`AttributeConverter`] implementation can be used as a Jakarta Persistence basic attribute converter. -specj -If the {jpaJavadocUrlPrefix}Converter.html#autoApply--[`autoApply`] attribute is set to `true`, then the Jakarta Persistence provider will automatically convert all basic attributes with the same Java type as defined by the current converter. - -See the <> section for more info. - -[[annotations-jpa-converts]] -==== `@Converts` - -The {jpaJavadocUrlPrefix}Converts.html[`@Converts`] annotation is used to group multiple <> annotations. - -See the <> section for more info. - -[[annotations-jpa-discriminatorcolumn]] -==== `@DiscriminatorColumn` - -The {jpaJavadocUrlPrefix}DiscriminatorColumn.html[`@DiscriminatorColumn`] annotation is used to specify the discriminator column name and the {jpaJavadocUrlPrefix}DiscriminatorColumn.html#discriminatorType--[discriminator type] for the `SINGLE_TABLE` and `JOINED` inheritance strategies. - -See the <> section for more info. - -[[annotations-jpa-discriminatorvalue]] -==== `@DiscriminatorValue` - -The {jpaJavadocUrlPrefix}DiscriminatorValue.html[`@DiscriminatorValue`] annotation is used to specify what value of the discriminator column is used for mapping the currently annotated entity. - -See the <> section for more info. - -[[annotations-jpa-elementcollection]] -==== `@ElementCollection` - -The {jpaJavadocUrlPrefix}ElementCollection.html[`@ElementCollection`] annotation is used to specify a collection of a basic or embeddable types. - -See the <> section for more info. - -[[annotations-jpa-embeddable]] -==== `@Embeddable` - -The {jpaJavadocUrlPrefix}Embeddable.html[`@Embeddable`] annotation is used to specify embeddable types. Like basic types, embeddable types do not have any identity, being managed by their owning entity. - -See the <> section for more info. - -[[annotations-jpa-embedded]] -==== `@Embedded` - -The {jpaJavadocUrlPrefix}Embedded.html[`@Embedded`] annotation is used to specify that a given entity attribute represents an embeddable type. - -See the <> section for more info. - -[[annotations-jpa-embeddedid]] -==== `@EmbeddedId` - -The {jpaJavadocUrlPrefix}EmbeddedId.html[`@EmbeddedId`] annotation is used to specify the entity identifier is an embeddable type. - -See the <> section for more info. - -[[annotations-jpa-entity]] -==== `@Entity` - -The {jpaJavadocUrlPrefix}Entity.html[`@Entity`] annotation is used to specify that the currently annotated class represents an entity type. -Unlike basic and embeddable types, entity types have an identity and their state is managed by the underlying Persistence Context. - -See the <> section for more info. - -[[annotations-jpa-entitylisteners]] -==== `@EntityListeners` - -The {jpaJavadocUrlPrefix}EntityListeners.html[`@EntityListeners`] annotation is used to specify an array of callback listener classes that are used by the currently annotated entity. - -See the <> section for more info. - -[[annotations-jpa-entityresult]] -==== `@EntityResult` - -The {jpaJavadocUrlPrefix}EntityResult.html[`@EntityResult`] annotation is used with the <> annotation to map the selected columns to an entity. - -See the <> section for more info. - -[[annotations-jpa-enumerated]] -==== `@Enumerated` - -The {jpaJavadocUrlPrefix}Enumerated.html[`@Enumerated`] annotation is used to specify that an entity attribute represents an enumerated type. - -See the <> section for more info. - -[[annotations-jpa-excludedefaultlisteners]] -==== `@ExcludeDefaultListeners` - -The {jpaJavadocUrlPrefix}ExcludeDefaultListeners.html[`@ExcludeDefaultListeners`] annotation is used to specify that the currently annotated entity skips the invocation of any default listener. - -See the <> section for more info. - -[[annotations-jpa-excludesuperclasslisteners]] -==== `@ExcludeSuperclassListeners` - -The {jpaJavadocUrlPrefix}ExcludeSuperclassListeners.html[`@ExcludeSuperclassListeners`] annotation is used to specify that the currently annotated entity skips the invocation of listeners declared by its superclass. - -See the <> section for more info. - -[[annotations-jpa-fieldresult]] -==== `@FieldResult` - -The {jpaJavadocUrlPrefix}FieldResult.html[`@FieldResult`] annotation is used with the <> annotation to map the selected columns to the fields of some specific entity. - -See the <> section for more info. - -[[annotations-jpa-foreignkey]] -==== `@ForeignKey` - -The {jpaJavadocUrlPrefix}ForeignKey.html[`@ForeignKey`] annotation is used to specify the associated foreign key of a <> mapping. -The `@ForeignKey` annotation is only used if the automated schema generation tool is enabled, in which case, it allows you to customize the underlying foreign key definition. - -See the <> section for more info. - -[[annotations-jpa-generatedvalue]] -==== `@GeneratedValue` - -The {jpaJavadocUrlPrefix}GeneratedValue.html[`@GeneratedValue`] annotation specifies that the entity identifier value is automatically generated using an identity column, a database sequence, or a table generator. -Hibernate supports the `@GeneratedValue` mapping even for `UUID` identifiers. - -See the <> section for more info. - -[[annotations-jpa-id]] -==== `@Id` - -The {jpaJavadocUrlPrefix}Id.html[`@Id`] annotation specifies the entity identifier. -An entity must always have an identifier attribute which is used when loading the entity in a given Persistence Context. - -See the <> section for more info. - -[[annotations-jpa-idclass]] -==== `@IdClass` - -The {jpaJavadocUrlPrefix}IdClass.html[`@IdClass`] annotation is used if the current entity defined a composite identifier. -A separate class encapsulates all the identifier attributes, which are mirrored by the current entity mapping. - -See the <> section for more info. - -[[annotations-jpa-index]] -==== `@Index` - -The {jpaJavadocUrlPrefix}Index.html[`@Index`] annotation is used by the automated schema generation tool to create a database index. - -See the <> chapter for more info. - -[[annotations-jpa-inheritance]] -==== `@Inheritance` - -The {jpaJavadocUrlPrefix}Inheritance.html[`@Inheritance`] annotation is used to specify the inheritance strategy of a given entity class hierarchy. - -See the <> section for more info. - -[[annotations-jpa-joincolumn]] -==== `@JoinColumn` - -The {jpaJavadocUrlPrefix}JoinColumn.html[`@JoinColumn`] annotation is used to specify the FOREIGN KEY column used when joining an entity association or an embeddable collection. - -See the <> section for more info. - -[[annotations-jpa-joincolumns]] -==== `@JoinColumns` - -The {jpaJavadocUrlPrefix}JoinColumns.html[`@JoinColumns`] annotation is used to group multiple <> annotations, which are used when mapping entity association or an embeddable collection using a composite identifier. - -[[annotations-jpa-jointable]] -==== `@JoinTable` - -The {jpaJavadocUrlPrefix}JoinTable.html[`@JoinTable`] annotation is used to specify the link table between two other database tables. - -See the <> section for more info. - -[[annotations-jpa-lob]] -==== `@Lob` - -The {jpaJavadocUrlPrefix}Lob.html[`@Lob`] annotation is used to specify that the currently annotated entity attribute represents a large object type. - -See the <> section for more info. - -[[annotations-jpa-manytomany]] -==== `@ManyToMany` - -The {jpaJavadocUrlPrefix}ManyToMany.html[`@ManyToMany`] annotation is used to specify a many-to-many database relationship. - -See the <> section for more info. - -[[annotations-jpa-manytoone]] -==== `@ManyToOne` - -The {jpaJavadocUrlPrefix}ManyToOne.html[`@ManyToOne`] annotation is used to specify a many-to-one database relationship. - -See the <> section for more info. - -[[annotations-jpa-mapkey]] -==== `@MapKey` - -The {jpaJavadocUrlPrefix}MapKey.html[`@MapKey`] annotation is used to specify the key of a `java.util.Map` association for which the key type is either the primary key or an attribute of the entity which represents the value of the map. - -See the <> section for more info. - -[[annotations-jpa-mapkeyclass]] -==== `@MapKeyClass` - -The {jpaJavadocUrlPrefix}MapKeyClass.html[`@MapKeyClass`] annotation is used to specify the type of the map key of a `java.util.Map` associations. - -See the <> section for more info. - -[[annotations-jpa-mapkeycolumn]] -==== `@MapKeyColumn` - -The {jpaJavadocUrlPrefix}MapKeyColumn.html[`@MapKeyColumn`] annotation is used to specify the database column which stores the key of a `java.util.Map` association for which the map key is a basic type. - -See the <> for an example of `@MapKeyColumn` annotation usage. - -[[annotations-jpa-mapkeyenumerated]] -==== `@MapKeyEnumerated` - -The {jpaJavadocUrlPrefix}MapKeyEnumerated.html[`@MapKeyEnumerated`] annotation is used to specify that the key of `java.util.Map` association is a Java Enum. - -See the <> section for more info. - -[[annotations-jpa-mapkeyjoincolumn]] -==== `@MapKeyJoinColumn` - -The {jpaJavadocUrlPrefix}MapKeyJoinColumn.html[`@MapKeyJoinColumn`] annotation is used to specify that the key of `java.util.Map` association is an entity association. -The map key column is a FOREIGN KEY in a link table that also joins the `Map` owner's table with the table where the `Map` value resides. - -See the <> section for more info. - -[[annotations-jpa-mapkeyjoincolumns]] -==== `@MapKeyJoinColumns` - -The {jpaJavadocUrlPrefix}MapKeyJoinColumns.html[`@MapKeyJoinColumns`] annotation is used to group several <> mappings when the `java.util.Map` association key uses a composite identifier. - -[[annotations-jpa-mapkeytemporal]] -==== `@MapKeyTemporal` - -The {jpaJavadocUrlPrefix}MapKeyTemporal.html[`@MapKeyTemporal`] annotation is used to specify that the key of `java.util.Map` association is a {jpaJavadocUrlPrefix}TemporalType.html[`@TemporalType`] (e.g. `DATE`, `TIME`, `TIMESTAMP`). - -See the <> section for more info. - -[[annotations-jpa-mappedsuperclass]] -==== `@MappedSuperclass` - -The {jpaJavadocUrlPrefix}MappedSuperclass.html[`@MappedSuperclass`] annotation is used to specify that the currently annotated type attributes are inherited by any subclass entity. - -See the <> section for more info. - -[[annotations-jpa-mapsid]] -==== `@MapsId` - -The {jpaJavadocUrlPrefix}MapsId.html[`@MapsId`] annotation is used to specify that the entity identifier is mapped by the currently annotated `@ManyToOne` or `@OneToOne` association. - -See the <> section for more info. - -[[annotations-jpa-namedattributenode]] -==== `@NamedAttributeNode` - -The {jpaJavadocUrlPrefix}NamedAttributeNode.html[`@NamedAttributeNode`] annotation is used to specify each individual attribute node that needs to be fetched by an Entity Graph. - -See the <> section for more info. - -[[annotations-jpa-namedentitygraph]] -==== `@NamedEntityGraph` - -The {jpaJavadocUrlPrefix}NamedEntityGraph.html[`@NamedEntityGraph`] annotation is used to specify an Entity Graph that can be used by an entity query to override the default fetch plan. - -See the <> section for more info. - -[[annotations-jpa-namedentitygraphs]] -==== `@NamedEntityGraphs` - -The {jpaJavadocUrlPrefix}NamedEntityGraphs.html[`@NamedEntityGraphs`] annotation is used to group multiple <> annotations. - -[[annotations-jpa-namednativequeries]] -==== `@NamedNativeQueries` - -The {jpaJavadocUrlPrefix}NamedNativeQueries.html[`@NamedNativeQueries`] annotation is used to group multiple <> annotations. - -See the <> section for more info. - -[[annotations-jpa-namednativequery]] -==== `@NamedNativeQuery` - -The {jpaJavadocUrlPrefix}NamedNativeQuery.html[`@NamedNativeQuery`] annotation is used to specify a native SQL query that can be retrieved later by its name. - -See the <> section for more info. - -[[annotations-jpa-namedqueries]] -==== `@NamedQueries` - -The {jpaJavadocUrlPrefix}NamedQueries.html[`@NamedQueries`] annotation is used to group multiple <> annotations. - -[[annotations-jpa-namedquery]] -==== `@NamedQuery` - -The {jpaJavadocUrlPrefix}NamedQuery.html[`@NamedQuery`] annotation is used to specify a JPQL query that can be retrieved later by its name. - -See the <> section for more info. - -[[annotations-jpa-namedstoredprocedurequeries]] -==== `@NamedStoredProcedureQueries` - -The {jpaJavadocUrlPrefix}NamedStoredProcedureQueries.html[`@NamedStoredProcedureQueries`] annotation is used to group multiple <> annotations. - -[[annotations-jpa-namedstoredprocedurequery]] -==== `@NamedStoredProcedureQuery` - -The {jpaJavadocUrlPrefix}NamedStoredProcedureQuery.html[`@NamedStoredProcedureQuery`] annotation is used to specify a stored procedure query that can be retrieved later by its name. - -See the <> section for more info. - -[[annotations-jpa-namedsubgraph]] -==== `@NamedSubgraph` - -The {jpaJavadocUrlPrefix}NamedSubgraph.html[`@NamedSubgraph`] annotation used to specify a subgraph in an Entity Graph. - -See the <> section for more info. - -[[annotations-jpa-onetomany]] -==== `@OneToMany` - -The {jpaJavadocUrlPrefix}OneToMany.html[`@OneToMany`] annotation is used to specify a one-to-many database relationship. - -See the <> section for more info. - -[[annotations-jpa-onetoone]] -==== `@OneToOne` - -The {jpaJavadocUrlPrefix}OneToOne.html[`@OneToOne`] annotation is used to specify a one-to-one database relationship. - -See the <> section for more info. - -[[annotations-jpa-orderby]] -==== `@OrderBy` - -The {jpaJavadocUrlPrefix}OrderBy.html[`@OrderBy`] annotation is used to specify the entity attributes used for sorting when fetching the currently annotated collection. - -See the <> section for more info. - -[[annotations-jpa-ordercolumn]] -==== `@OrderColumn` - -The {jpaJavadocUrlPrefix}OrderColumn.html[`@OrderColumn`] annotation is used to specify that the current annotation collection order should be materialized in the database. - -See the <> section for more info. - -[[annotations-jpa-persistencecontext]] -==== `@PersistenceContext` - -The {jpaJavadocUrlPrefix}PersistenceContext.html[`@PersistenceContext`] annotation is used to specify the `EntityManager` that needs to be injected as a dependency. - -See the <> section for more info. - -[[annotations-jpa-persistencecontexts]] -==== `@PersistenceContexts` - -The {jpaJavadocUrlPrefix}PersistenceContexts.html[`@PersistenceContexts`] annotation is used to group multiple <> annotations. - -[[annotations-jpa-persistenceproperty]] -==== `@PersistenceProperty` - -The {jpaJavadocUrlPrefix}PersistenceProperty.html[`@PersistenceProperty`] annotation is used by the <> annotation to declare Jakarta Persistence provider properties that are passed to the underlying container when the `EntityManager` instance is created. - -See the <> section for more info. - -[[annotations-jpa-persistenceunit]] -==== `@PersistenceUnit` - -The {jpaJavadocUrlPrefix}PersistenceUnit.html[`@PersistenceUnit`] annotation is used to specify the `EntityManagerFactory` that needs to be injected as a dependency. - -See the <> section for more info. - -[[annotations-jpa-persistenceunits]] -==== `@PersistenceUnits` - -The {jpaJavadocUrlPrefix}PersistenceUnits.html[`@PersistenceUnits`] annotation is used to group multiple <> annotations. - -[[annotations-jpa-postload]] -==== `@PostLoad` - -The {jpaJavadocUrlPrefix}PostLoad.html[`@PostLoad`] annotation is used to specify a callback method that fires after an entity is loaded. - -See the <> section for more info. - -[[annotations-jpa-postpersist]] -==== `@PostPersist` - -The {jpaJavadocUrlPrefix}PostPersist.html[`@PostPersist`] annotation is used to specify a callback method that fires after an entity is persisted. - -See the <> section for more info. - -[[annotations-jpa-postremove]] -==== `@PostRemove` - -The {jpaJavadocUrlPrefix}PostRemove.html[`@PostRemove`] annotation is used to specify a callback method that fires after an entity is removed. - -See the <> section for more info. - -[[annotations-jpa-postupdate]] -==== `@PostUpdate` - -The {jpaJavadocUrlPrefix}PostUpdate.html[`@PostUpdate`] annotation is used to specify a callback method that fires after an entity is updated. - -See the <> section for more info. - -[[annotations-jpa-prepersist]] -==== `@PrePersist` - -The {jpaJavadocUrlPrefix}PrePersist.html[`@PrePersist`] annotation is used to specify a callback method that fires before an entity is persisted. - -See the <> section for more info. - -[[annotations-jpa-preremove]] -==== `@PreRemove` - -The {jpaJavadocUrlPrefix}PreRemove.html[`@PreRemove`] annotation is used to specify a callback method that fires before an entity is removed. - -See the <> section for more info. - -[[annotations-jpa-preupdate]] -==== `@PreUpdate` - -The {jpaJavadocUrlPrefix}PreUpdate.html[`@PreUpdate`] annotation is used to specify a callback method that fires before an entity is updated. - -See the <> section for more info. - -[[annotations-jpa-primarykeyjoincolumn]] -==== `@PrimaryKeyJoinColumn` - -The {jpaJavadocUrlPrefix}PrimaryKeyJoinColumn.html[`@PrimaryKeyJoinColumn`] annotation is used to specify that the primary key column of the currently annotated entity is also a foreign key to some other entity -(e.g. a base class table in a `JOINED` inheritance strategy, the primary table in a secondary table mapping, or the parent table in a `@OneToOne` relationship). - -See the <> section for more info. - -[[annotations-jpa-primarykeyjoincolumns]] -==== `@PrimaryKeyJoinColumns` - -The {jpaJavadocUrlPrefix}PrimaryKeyJoinColumns.html[`@PrimaryKeyJoinColumns`] annotation is used to group multiple <> annotations. - -[[annotations-jpa-queryhint]] -==== `@QueryHint` - -The {jpaJavadocUrlPrefix}QueryHint.html[`@QueryHint`] annotation is used to specify a Jakarta Persistence provider hint used by a `@NamedQuery` or a `@NamedNativeQuery` annotation. - -See the <> section for more info. - -[[annotations-jpa-secondarytable]] -==== `@SecondaryTable` - -The {jpaJavadocUrlPrefix}SecondaryTable.html[`@SecondaryTable`] annotation is used to specify a secondary table for the currently annotated entity. - -See the <> section for more info. - -[[annotations-jpa-secondarytables]] -==== `@SecondaryTables` - -The {jpaJavadocUrlPrefix}SecondaryTables.html[`@SecondaryTables`] annotation is used to group multiple <> annotations. - -[[annotations-jpa-sequencegenerator]] -==== `@SequenceGenerator` - -The {jpaJavadocUrlPrefix}SequenceGenerator.html[`@SequenceGenerator`] annotation is used to specify the database sequence used by the identifier generator of the currently annotated entity. - -See the <> section for more info. - -[[annotations-jpa-sqlresultsetmapping]] -==== `@SqlResultSetMapping` - -The {jpaJavadocUrlPrefix}SqlResultSetMapping.html[`@SqlResultSetMapping`] annotation is used to specify the `ResultSet` mapping of a native SQL query or stored procedure. - -See the <> section for more info. - -[[annotations-jpa-sqlresultsetmappings]] -==== `@SqlResultSetMappings` - -The {jpaJavadocUrlPrefix}SqlResultSetMappings.html[`@SqlResultSetMappings`] annotation is group multiple <> annotations. - -[[annotations-jpa-storedprocedureparameter]] -==== `@StoredProcedureParameter` - -The {jpaJavadocUrlPrefix}StoredProcedureParameter.html[`@StoredProcedureParameter`] annotation is used to specify a parameter of a <>. - -See the <> section for more info. - -[[annotations-jpa-table]] -==== `@Table` - -The {jpaJavadocUrlPrefix}Table.html[`@Table`] annotation is used to specify the primary table of the currently annotated entity. - -See the <> section for more info. - -[[annotations-jpa-tablegenerator]] -==== `@TableGenerator` - -The {jpaJavadocUrlPrefix}TableGenerator.html[`@TableGenerator`] annotation is used to specify the database table used by the identity generator of the currently annotated entity. - -See the <> section for more info. - -[[annotations-jpa-temporal]] -==== `@Temporal` - -The {jpaJavadocUrlPrefix}Temporal.html[`@Temporal`] annotation is used to specify the `TemporalType` of the currently annotated `java.util.Date` or `java.util.Calendar` entity attribute. - -See the <> chapter for more info. - -[[annotations-jpa-transient]] -==== `@Transient` - -The {jpaJavadocUrlPrefix}Transient.html[`@Transient`] annotation is used to specify that a given entity attribute should not be persisted. - -See the <> section for more info. - -[[annotations-jpa-uniqueconstraint]] -==== `@UniqueConstraint` - -The {jpaJavadocUrlPrefix}UniqueConstraint.html[`@UniqueConstraint`] annotation is used to specify a unique constraint to be included by the automated schema generator for the primary or secondary table associated with the currently annotated entity. - -See the <> chapter for more info. - -[[annotations-jpa-version]] -==== `@Version` - -The {jpaJavadocUrlPrefix}Version.html[`@Version`] annotation is used to specify the version attribute used for optimistic locking. - -See the <> section for more info. - -[[annotations-hibernate]] -=== Hibernate annotations - -[[annotations-hibernate-any]] -==== `@Any` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Any.html[`@Any`] annotation is -used to define the *any-to-one* association which can point to one of several entity types. - -See the <> section for more info. - -[[annotations-hibernate-anydiscriminator]] -==== `@AnyDiscriminator` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AnyDiscriminator.html[`@AnyDiscriminator`] -annotation is used to provide details about the discriminator portion of an `@Any` or `@ManyToAny` mapping. - -See the <> section for more info. - -[[annotations-hibernate-anydiscriminatorvalue]] -==== `@AnyDiscriminatorValue` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AnyDiscriminatorValue.html[`@AnyDiscriminatorValue`] -annotation maps a single discriminator value to its corresponding entity - -See the <> section for more info. - -[[annotations-hibernate-anydiscriminatorvalues]] -==== `@AnyDiscriminatorValues` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AnyDiscriminatorValues.html[`@AnyDiscriminatorValues`] -annotation groups multiple <> annotations. - -See the <> section for more info. - -[[annotations-hibernate-anykeyjavaclass]] -==== `@AnyKeyJavaClass` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AnyKeyJavaClass.html[`@AnyKeyJavaClass`] -annotation specifies the Java Class to use for the foreign-key of an ANY mapping - -See the <> section for more info. - -[[annotations-hibernate-anykeyjavatype]] -==== `@AnyKeyJavaType` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AnyKeyJavaType.html[`@AnyKeyJavaType`] -annotation specifies a specific JavaType descriptor to use for the foreign-key of an ANY mapping - -See the <> section for more info. - - -[[annotations-hibernate-anykeyjdbctype]] -==== `@AnyKeyJdbcType` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AnyKeyJdbcType.html[`@AnyKeyJdbcType`] -annotation specifies a specific JdbcType descriptor to use for the foreign-key of an ANY mapping - -See the <> section for more info. - -[[annotations-hibernate-anykeyjdbctypecode]] -==== `@AnyKeyJdbcTypeCode` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AnyKeyJdbcTypeCode.html[`@AnyKeyJdbcTypeCode`] -annotation specifies a "type code" indicating which JdbcType descriptor to use for the foreign-key of an ANY mapping - -See the <> section for more info. - - -[[annotations-hibernate-attributeaccessor]] -==== `@AttributeAccessor` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AttributeAccessor.html[`@AttributeAccessor`] annotation is used to specify a custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/property/access/spi/PropertyAccessStrategy.html[`PropertyAccessStrategy`]. - -Should only be used to name a custom `PropertyAccessStrategy`. -For property/field access type, the Jakarta Persistence <> annotation should be preferred. - -However, if this annotation is used with either value="property" or value="field", it will act just as the corresponding usage of the Jakarta Persistence <> annotation. - - -[[annotations-hibernate-attributebindertype]] -==== `@AttributeBinderType` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/AttributeBinderType.html[`@AttributeBinderType`] -annotation is a meta-annotation used to annotate a custom annotation type used to drive customized model binding. - -See <>. - - -[[annotations-hibernate-batchsize]] -==== `@BatchSize` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/BatchSize.html[`@BatchSize`] annotation is used to specify the size for batch loading the entries of a lazy collection. - -See the <> section for more info. - -[[annotations-hibernate-cache]] -==== `@Cache` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Cache.html[`@Cache`] annotation is used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CacheConcurrencyStrategy.html[`CacheConcurrencyStrategy`] of a root entity or a collection. - -See the <> chapter for more info. - -[[annotations-hibernate-cascade]] -==== `@Cascade` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Cascade.html[`@Cascade`] annotation is used to apply the Hibernate specific https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CascadeType.html[`CascadeType`] strategies (e.g. `CascadeType.LOCK`, `CascadeType.SAVE_UPDATE`, `CascadeType.REPLICATE`) on a given association. - -For Jakarta Persistence cascading, prefer using the {jpaJavadocUrlPrefix}CascadeType.html[`jakarta.persistence.CascadeType`] instead. - -When combining both Jakarta Persistence and Hibernate `CascadeType` strategies, Hibernate will merge both sets of cascades. - -See the <> chapter for more info. - -[[annotations-hibernate-check]] -==== `@Check` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Check.html[`@Check`] annotation is used to specify an arbitrary SQL CHECK constraint which can be defined at the class level. - -See the <> chapter for more info. - -[[annotations-hibernate-collectionid]] -==== `@CollectionId` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CollectionId.html[`@CollectionId`] annotation is used to specify an identifier column for an idbag collection. - -You might want to use the Jakarta Persistence <> instead. - -[[annotations-hibernate-collectiontype]] -==== `@CollectionType` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CollectionType.html[`@CollectionType`] annotation is used to specify a custom collection type. - -The collection can also name a <>, which defines the Hibernate Type of the collection elements. - -See the <> chapter for more info. - -[[annotations-hibernate-columndefault]] -==== `@ColumnDefault` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ColumnDefault.html[`@ColumnDefault`] annotation is used to specify the `DEFAULT` DDL value to apply when using the automated schema generator. - -The same behavior can be achieved using the `definition` attribute of the Jakarta Persistence <> annotation. - -See the <> chapter for more info. - -[[annotations-hibernate-columns]] -==== `@Columns` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Columns.html[`@Columns`] annotation is used to group multiple Jakarta Persistence <> annotations. - -See the <> section for more info. - -[[annotations-hibernate-columntransformer]] -==== `@ColumnTransformer` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ColumnTransformer.html[`@ColumnTransformer`] annotation is used to customize how a given column value is read from or written into the database. - -See the <> section for more info. - -[[annotations-hibernate-columntransformers]] -==== `@ColumnTransformers` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ColumnTransformers.html[`@ColumnTransformers`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-creationtimestamp]] -==== `@CreationTimestamp` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CreationTimestamp.html[`@CreationTimestamp`] annotation is used to specify that the currently annotated temporal type must be initialized with the current JVM timestamp value. - -The supported property types are: - -- `java.util.Date` -- `java.util.Calendar` -- `java.sql.Date` -- `java.sql.Time` -- `java.sql.Timestamp` -- `java.time.Instant` -- `java.time.LocalDate` -- `java.time.LocalDateTime` -- `java.time.LocalTime` -- `java.time.MonthDay` -- `java.time.OffsetDateTime` -- `java.time.OffsetTime` -- `java.time.Year` -- `java.time.YearMonth` -- `java.time.ZonedDateTime` - -See the <> section for more info. - -[[annotations-hibernate-discriminatorformula]] -==== `@DiscriminatorFormula` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/DiscriminatorFormula.html[`@DiscriminatorFormula`] annotation is used to specify a Hibernate <> to resolve the inheritance discriminator value. - -See the <> section for more info. - -[[annotations-hibernate-discriminatoroptions]] -==== `@DiscriminatorOptions` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/DiscriminatorOptions.html[`@DiscriminatorOptions`] annotation is used to provide the `force` and `insert` Discriminator properties. - -See the <> section for more info. - -[[annotations-hibernate-dynamicinsert]] -==== `@DynamicInsert` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/DynamicInsert.html[`@DynamicInsert`] annotation is used to specify that the `INSERT` SQL statement should be generated whenever an entity is to be persisted. - -By default, Hibernate uses a cached `INSERT` statement that sets all table columns. -When the entity is annotated with the `@DynamicInsert` annotation, the `PreparedStatement` is going to include only the non-null columns. - -See the <> section for more info on how `@DynamicInsert` works. - -[[annotations-hibernate-dynamicupdate]] -==== `@DynamicUpdate` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/DynamicUpdate.html[`@DynamicUpdate`] annotation is used to specify that the `UPDATE` SQL statement should be generated whenever an entity is modified. - -By default, Hibernate uses a cached `UPDATE` statement that sets all table columns. -When the entity is annotated with the `@DynamicUpdate` annotation, the `PreparedStatement` is going to include only the columns whose values have been changed. - -See the <> section for more info. - -[NOTE] -==== -For reattachment of detached entities, the dynamic update is not possible without having the <> annotation as well. -==== - -[[annotations-hibernate-fetch]] -==== `@Fetch` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Fetch.html[`@Fetch`] annotation is used to specify the Hibernate specific https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FetchMode.html[`FetchMode`] (e.g. `JOIN`, `SELECT`, `SUBSELECT`) used for the currently annotated association. - -See the <> section for more info. - -[[annotations-hibernate-fetchprofile]] -==== `@FetchProfile` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FetchProfile.html[`@FetchProfile`] annotation is used to specify a custom fetching profile, similar to a Jakarta Persistence Entity Graph. - -See the <> section for more info. - -[[annotations-hibernate-fetchprofile-fetchoverride]] -==== `@FetchProfile.FetchOverride` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FetchProfile.FetchOverride.html[`@FetchProfile.FetchOverride`] annotation is used in conjunction with the <> annotation, -and it's used for overriding the fetching strategy of a particular entity association. - -See the <> section for more info. - -[[annotations-hibernate-fetchprofiles]] -==== `@FetchProfiles` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FetchProfiles.html[`@FetchProfiles`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-filter]] -==== `@Filter` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Filter.html[`@Filter`] annotation is used to add filters to an entity or the target entity of a collection. - -See the <> section for more info. - -[[annotations-hibernate-filterdef]] -==== `@FilterDef` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FilterDef.html[`@FilterDef`] annotation is used to specify a `@Filter` definition (name, default condition and parameter types, if any). - -See the <> section for more info. - -[[annotations-hibernate-filterdefs]] -==== `@FilterDefs` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FilterDefs.html[`@FilterDefs`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-filterjointable]] -==== `@FilterJoinTable` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FilterJoinTable.html[`@FilterJoinTable`] annotation is used to add `@Filter` capabilities to a join table collection. - -See the <> section for more info. - -[[annotations-hibernate-filterjointables]] -==== `@FilterJoinTables` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/FilterJoinTables.html[`@FilterJoinTables`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-filters]] -==== `@Filters` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Filters.html[`@Filters`] annotation is used to group multiple <> annotations. - -==== [line-through]#`@ForeignKey`# - - -[[annotations-hibernate-formula]] -==== `@Formula` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Formula.html[`@Formula`] annotation is used to specify an SQL fragment that is executed in order to populate a given entity attribute. - -See the <> section for more info. - -[[annotations-hibernate-generated]] -==== `@Generated` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Generated.html[`@Generated`] annotation is used to specify that the currently annotated entity attribute is generated by the database. - -See the <> section for more info. - -[[annotations-hibernate-generatedcolumn]] -==== `@GeneratedColumn` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/GeneratedColumn.html[`@GeneratedColumn`] annotation is used to specify that an entity attribute is generated by the database using `GENERATED ALWAYS AS` DDL. - -[[annotations-hibernate-generatortype]] -==== `@GeneratorType` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/GeneratorType.html[`@GeneratorType`] annotation is used to provide a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tuple/ValueGenerator.html[`ValueGenerator`] -and a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/GenerationTime.html[`GenerationTime`] for the currently annotated generated attribute. - -See the <> section for more info. - -[[annotations-hibernate-genericgenerator]] -==== `@GenericGenerator` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/GenericGenerator.html[`@GenericGenerator`] annotation can be used to configure any Hibernate identifier generator. - -See the <> section for more info. - -[[annotations-hibernate-genericgenerators]] -==== `@GenericGenerators` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/GenericGenerators.html[`@GenericGenerators`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-immutable]] -==== `@Immutable` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Immutable.html[`@Immutable`] annotation is used to specify that the annotated entity, attribute, or collection is immutable. - -See the <> section for more info. - -[[annotations-hibernate-index]] -==== [line-through]#`@Index`# - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Index.html[[line-through]#`@Index`#] annotation is deprecated. Use the Jakarta Persistence <> annotation instead. - -[[annotations-hibernate-indexcolumn]] -==== [line-through]#`@IndexColumn`# - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/IndexColumn.html[[line-through]#`@IndexColumn`#] annotation is deprecated. Use the Jakarta Persistence <> annotation instead. - -[[annotations-hibernate-joincolumnorformula]] -==== `@JoinColumnOrFormula` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/JoinColumnOrFormula.html[`@JoinColumnOrFormula`] annotation is used to specify that the entity association is resolved either through a FOREIGN KEY join (e.g. <>) or using the result of a given SQL formula (e.g. <>). - -See the <> section for more info. - -[[annotations-hibernate-joincolumnsorformulas]] -==== `@JoinColumnsOrFormulas` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/JoinColumnsOrFormulas.html[`@JoinColumnsOrFormulas`] annotation is used to group multiple <> annotations. -[[annotations-hibernate-joinformula]] -==== `@JoinFormula` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/JoinFormula.html[`@JoinFormula`] annotation is used as a replacement for <> when the association does not have a dedicated FOREIGN KEY column. - -See the <> section for more info. - -[[annotations-hibernate-lazycollection]] -==== `@LazyCollection` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Index.html[[line-through]#`@LazyCollection`#] annotation is deprecated. - -[[annotations-hibernate-lazygroup]] -==== `@LazyGroup` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/LazyGroup.html[`@LazyGroup`] annotation is used to specify that an entity attribute should be fetched along with all the other attributes belonging to the same group. - -To load entity attributes lazily, bytecode enhancement is needed. -By default, all non-collection attributes are loaded in one group named "DEFAULT". - -This annotation allows defining different groups of attributes to be initialized together when access one attribute in the group. - -See the <> section for more info. - -[[annotations-hibernate-lazytoone]] -==== `@LazyToOne` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Index.html[[line-through]#`@LazyToOne`#] annotation is deprecated. - -[[annotations-hibernate-listindexbase]] -==== `@ListIndexBase` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ListIndexBase.html[`@ListIndexBase`] annotation is used to specify the start value for a list index, as stored in the database. - -By default, `List` indexes are stored starting at zero. Generally used in conjunction with <>. - -See the <> section for more info. - -[[annotations-hibernate-loader]] -==== `@Loader` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Loader.html[`@Loader`] annotation is used to override the default `SELECT` query used for loading an entity. - -See the <> section for more info. - -[[annotations-hibernate-manytoany]] -==== `@ManyToAny` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ManyToAny.html[`@ManyToAny`] annotation is used to specify a many-to-one association when the target type is dynamically resolved. - -See the <> section for more info. - -[[annotations-hibernate-mapkeytype]] -==== `@MapKeyType` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/MapKeyType.html[`@MapKeyType`] annotation is used to specify the map key type. - -See the <> section for more info. - -[[annotations-hibernate-metavalue]] -==== `@MetaValue` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/MetaValue.html[`@MetaValue`] annotation is used by the <> annotation to specify the association between a given discriminator value and an entity type. - -See the <> section for more info. - -[[annotations-hibernate-namednativequeries]] -==== `@NamedNativeQueries` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NamedNativeQueries.html[`@NamedNativeQueries`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-namednativequery]] -==== `@NamedNativeQuery` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NamedNativeQuery.html[`@NamedNativeQuery`] annotation extends the Jakarta Persistence <> with Hibernate specific features, like: - -- flush mode for this particular query -- if the query should be cached, and which cache region should be used -- the selected entity `CacheModeType` strategy -- the JDBC `Statement` fetch size -- the JDBC `Statement` execution timeout -- if the query is a `CallableStatement`, targeting a stored procedure or a database function -- what SQL-level comment should be sent to the database -- if the query is read-only, hence it does not store the resulted entities into the currently running Persistence Context - -See the <> section for more info. - -[[annotations-hibernate-namedqueries]] -==== `@NamedQueries` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NamedQueries.html[`@NamedQueries`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-namedquery]] -==== `@NamedQuery` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NamedQuery.html[`@NamedQuery`] annotation extends the Jakarta Persistence <> with Hibernate specific features, like: - -- flush mode for this particular query -- if the query should be cached, and which cache region should be used -- the selected entity `CacheModeType` strategy -- the JDBC `Statement` fetch size -- the JDBC `Statement` execution timeout -- if the query is a `CallableStatement`, targeting a stored procedure or a database function -- what SQL-level comment should be sent to the database -- if the query is read-only, hence it does not store the resulted entities into the currently running Persistence Context - -See the <> section for more info. - -[[annotations-hibernate-nationalized]] -==== `@Nationalized` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Nationalized.html[`@Nationalized`] annotation is used to specify that the currently annotated attribute is a character type (e.g. `String`, `Character`, `Clob`) that is stored in a nationalized column type (`NVARCHAR`, `NCHAR`, `NCLOB`). - -See the <> section for more info. - -[[annotations-hibernate-naturalid]] -==== `@NaturalId` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NaturalId.html[`@NaturalId`] annotation is used to specify that the currently annotated attribute is part of the natural id of the entity. - -See the <> section for more info. - -[[annotations-hibernate-naturalidcache]] -==== `@NaturalIdCache` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NaturalIdCache.html[`@NaturalIdCache`] annotation is used to specify that the natural id values associated with the annotated entity should be stored in the second-level cache. - -See the <> section for more info. - -[[annotations-hibernate-notfound]] -==== `@NotFound` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NotFound.html[`@NotFound`] annotation is used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/NotFoundAction.html[`NotFoundAction`] strategy for when an element is not found in a given association. - -The `NotFoundAction` defines two possibilities: - -`EXCEPTION`:: An exception is thrown when an element is not found (default and recommended). -`IGNORE`:: Ignore the element when not found in the database. - -See the <> section for more info. - -[[annotations-hibernate-ondelete]] -==== `@OnDelete` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OnDelete.html[`@OnDelete`] annotation is used to specify the delete strategy employed by the currently annotated collection, array or joined subclasses. -This annotation is used by the automated schema generation tool to generated the appropriate FOREIGN KEY DDL cascade directive. - -The two possible strategies are defined by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OnDeleteAction.html[`OnDeleteAction`] enumeration: - -CASCADE:: Use the database FOREIGN KEY cascade capabilities. -NO_ACTION:: Take no action. - -See the <> chapter for more info. - -[[annotations-hibernate-optimisticlock]] -==== `@OptimisticLock` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OptimisticLock.html[`@OptimisticLock`] annotation is used to specify if the currently annotated attribute will trigger an entity version increment upon being modified. - -See the <> section for more info. - -[[annotations-hibernate-optimisticlocking]] -==== `@OptimisticLocking` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OptimisticLocking.html[`@OptimisticLocking`] annotation is used to specify the currently annotated entity's optimistic locking strategy. - -The four possible strategies are defined by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OptimisticLockType.html[`OptimisticLockType`] enumeration: - -NONE:: The implicit optimistic locking mechanism is disabled. -VERSION:: The implicit optimistic locking mechanism is using a dedicated version column. -ALL:: The implicit optimistic locking mechanism is using *all* attributes as part of an expanded WHERE clause restriction for the `UPDATE` and `DELETE` SQL statements. -DIRTY:: The implicit optimistic locking mechanism is using the *dirty* attributes (the attributes that were modified) as part of an expanded WHERE clause restriction for the `UPDATE` and `DELETE` SQL statements. - -See the <> section for more info. - -[[annotations-hibernate-orderby]] -==== `@OrderBy` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/OrderBy.html[`@OrderBy`] annotation is used to specify a *SQL* ordering directive for sorting the currently annotated collection. - -It differs from the Jakarta Persistence <> annotation because the Jakarta Persistence annotation expects a JPQL order-by fragment, not an SQL directive. - -See the <> section for more info. - -[[annotations-hibernate-paramdef]] -==== `@ParamDef` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ParamDef.html[`@ParamDef`] annotation is used in conjunction with <> so that the Hibernate Filter can be customized with runtime-provided parameter values. - -See the <> section for more info. - -[[annotations-hibernate-parameter]] -==== `@Parameter` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Parameter.html[`@Parameter`] annotation is a generic parameter (basically a key/value combination) used to parametrize other annotations, -like <>, <>, and <>, <>. - -[[annotations-hibernate-parent]] -==== `@Parent` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Parent.html[`@Parent`] annotation is used to specify that the currently annotated embeddable attribute references back the owning entity. - -See the <> section for more info. - -[[annotations-hibernate-persister]] -==== `@Persister` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Persister.html[`@Persister`] annotation is used to specify a custom entity or collection persister. - -For entities, the custom persister must implement the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/persister/entity/EntityPersister.html[`EntityPersister`] interface. - -For collections, the custom persister must implement the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/persister/collection/CollectionPersister.html[`CollectionPersister`] interface. - -See the <> section for more info. - -[[annotations-hibernate-polymorphism]] -==== `@Polymorphism` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Polymorphism.html[`@Polymorphism`] annotation is used to define the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/PolymorphismType.html[`PolymorphismType`] Hibernate will apply to entity hierarchies. - -There are two possible `PolymorphismType` options: - -EXPLICIT:: The currently annotated entity is retrieved only if explicitly asked. -IMPLICIT:: The currently annotated entity is retrieved if any of its super entities are retrieved. This is the default option. - -See the <> section for more info. - -[[annotations-hibernate-proxy]] -==== `@Proxy` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Proxy.html[`@Proxy`] annotation is used to specify a custom proxy implementation for the currently annotated entity. - -See the <> section for more info. - -[[annotations-hibernate-rowid]] -==== `@RowId` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/RowId.html[`@RowId`] annotation is used to specify the database column used as a `ROWID` _pseudocolumn_. -For instance, Oracle defines the https://docs.oracle.com/cd/B19306_01/server.102/b14200/pseudocolumns008.htm[`ROWID` pseudocolumn] which provides the address of every table row. - -According to Oracle documentation, `ROWID` is the fastest way to access a single row from a table. - -See the <> section for more info. - -[[annotations-hibernate-selectbeforeupdate]] -==== `@SelectBeforeUpdate` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SelectBeforeUpdate.html[`@SelectBeforeUpdate`] annotation is used to specify that the currently annotated entity state be selected from the database when determining whether to perform an update when the detached entity is reattached. - -See the <> section for more info on how `@SelectBeforeUpdate` works. - -[[annotations-hibernate-sortcomparator]] -==== `@SortComparator` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SortComparator.html[`@SortComparator`] annotation is used to specify a `Comparator` for sorting the `Set`/`Map` in-memory. - -See the <> section for more info. - -[[annotations-hibernate-sortnatural]] -==== `@SortNatural` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SortNatural.html[`@SortNatural`] annotation is used to specify that the `Set`/`Map` should be sorted using natural sorting. - -See the <> section for more info. - -[[annotations-hibernate-source]] -==== `@Source` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Source.html[`@Source`] annotation is used in conjunction with a `@Version` timestamp entity attribute indicating -the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SourceType.html[`SourceType`] of the timestamp value. - -The `SourceType` offers two options: - -DB:: Get the timestamp from the database. -VM:: Get the timestamp from the current JVM. - -See the <> section for more info. - -[[annotations-hibernate-sqldelete]] -==== `@SQLDelete` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SQLDelete.html[`@SQLDelete`] annotation is used to specify a custom SQL `DELETE` statement for the currently annotated entity or collection. - -See the <> section for more info. - -[[annotations-hibernate-sqldeleteall]] -==== `@SQLDeleteAll` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SQLDeleteAll.html[`@SQLDeleteAll`] annotation is used to specify a custom SQL `DELETE` statement when removing all elements of the currently annotated collection. - -See the <> section for more info. - -[[annotations-hibernate-sqlfragmentalias]] -==== `@SqlFragmentAlias` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SqlFragmentAlias.html[`@SqlFragmentAlias`] annotation is used to specify an alias for a Hibernate <>. - -The alias (e.g. `myAlias`) can then be used in the `@Filter` `condition` clause using the `{alias}` (e.g. `{myAlias}`) placeholder. - -See the <> section for more info. - -[[annotations-hibernate-sqlinsert]] -==== `@SQLInsert` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SQLInsert.html[`@SQLInsert`] annotation is used to specify a custom SQL `INSERT` statement for the currently annotated entity or collection. - -See the <> section for more info. - -[[annotations-hibernate-sqlupdate]] -==== `@SQLUpdate` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/SQLUpdate.html[`@SQLUpdate`] annotation is used to specify a custom SQL `UPDATE` statement for the currently annotated entity or collection. - -See the <> section for more info. - -[[annotations-hibernate-subselect]] -==== `@Subselect` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Subselect.html[`@Subselect`] annotation is used to specify an immutable and read-only entity using a custom SQL `SELECT` statement. - -See the <> section for more info. - -[[annotations-hibernate-synchronize]] -==== `@Synchronize` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Synchronize.html[`@Synchronize`] annotation is usually used in conjunction with the <> annotation to specify the list of database tables used by the `@Subselect` SQL query. - -With this information in place, Hibernate will properly trigger an entity flush whenever a query targeting the `@Subselect` entity is to be executed while the Persistence Context has scheduled some insert/update/delete actions against the database tables used by the `@Subselect` SQL query. - -Therefore, the `@Synchronize` annotation prevents the derived entity from returning stale data when executing entity queries against the `@Subselect` entity. - -See the <> section for more info. - -[[annotations-hibernate-table]] -==== `@Table` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Table.html[`@Table`] annotation is used to specify additional information to a Jakarta Persistence <> annotation, like custom `INSERT`, `UPDATE` or `DELETE` statements or a specific https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/FetchMode.html[`FetchMode`]. - -See the <> section for more info about Hibernate-specific `@Table` mapping. - -[[annotations-hibernate-tables]] -==== `@Tables` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Tables.html[`@Tables`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-target]] -==== `@Target` -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Target.html[`@Target`] annotation is used to specify an explicit target implementation when the currently annotated association is using an interface type. - -See the <> section for more info. - -[[annotations-hibernate-tuplizer]] -==== `@Tuplizer` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Tuplizer.html[`@Tuplizer`] annotation is used to specify a custom tuplizer for the currently annotated entity or embeddable. - -For entities, the tupelizer must implement the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tuple/entity/EntityTuplizer.html[`EntityTuplizer`] interface. - -For embeddables, the tupelizer must implement the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tuple/component/ComponentTuplizer.html[`ComponentTuplizer`] interface. - -See the <> section for more info. - -[[annotations-hibernate-tuplizers]] -==== `@Tuplizers` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Tuplizers.html[`@Tuplizers`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-type]] -==== `@Type` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Type.html[`@Type`] annotation is used to specify the Hibernate https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/type/Type.html[`@Type`] used by the currently annotated basic attribute. - -See the <> section for more info. - -[[annotations-hibernate-typedef]] -==== `@TypeDef` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/TypeDef.html[`@TypeDef`] annotation is used to specify a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/type/Type.html[`@Type`] definition which can later be reused for multiple basic attribute mappings. - -See the <> section for more info. - -[[annotations-hibernate-typedefs]] -==== `@TypeDefs` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/TypeDefs.html[`@TypeDefs`] annotation is used to group multiple <> annotations. - -[[annotations-hibernate-updatetimestamp]] -==== `@UpdateTimestamp` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/UpdateTimestamp.html[`@UpdateTimestamp`] annotation is used to specify that the currently annotated timestamp attribute should be updated with the current JVM timestamp whenever the owning entity gets modified. - -The supported property types are: - -- `java.util.Date` -- `java.util.Calendar` -- `java.sql.Date` -- `java.sql.Time` -- `java.sql.Timestamp` -- `java.time.Instant` -- `java.time.LocalDate` -- `java.time.LocalDateTime` -- `java.time.LocalTime` -- `java.time.MonthDay` -- `java.time.OffsetDateTime` -- `java.time.OffsetTime` -- `java.time.Year` -- `java.time.YearMonth` -- `java.time.ZonedDateTime` - -See the <> section for more info. - -[[annotations-hibernate-valuegenerationtype]] -==== `@ValueGenerationType` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/ValueGenerationType.html[`@ValueGenerationType`] annotation is used to specify that the current annotation type should be used as a generator annotation type. - -See the <> section for more info. - -[[annotations-hibernate-where]] -==== `@Where` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/Where.html[`@Where`] annotation is used to specify a custom SQL `WHERE` clause used when fetching an entity or a collection. - -See the <> section for more info. - -[[annotations-hibernate-wherejointable]] -==== `@WhereJoinTable` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/WhereJoinTable.html[`@WhereJoinTable`] annotation is used to specify a custom SQL `WHERE` clause used when fetching a join collection table. - -See the <> section for more info. - - -[[annotations-hibernate-tenantid]] -==== `@TenantId` - -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/TenantId.html[`@TenantId`] annotation identifies a field of an entity that holds a tenant id in discriminator-based multitenancy. diff --git a/documentation/src/main/asciidoc/userguide/appendices/BestPractices.adoc b/documentation/src/main/asciidoc/userguide/appendices/BestPractices.adoc index 6552ebad79fd..d64170c7230d 100644 --- a/documentation/src/main/asciidoc/userguide/appendices/BestPractices.adoc +++ b/documentation/src/main/asciidoc/userguide/appendices/BestPractices.adoc @@ -258,8 +258,3 @@ For this reason, `NONSTRICT_READ_WRITE` is not very suitable if entities are cha When using clustering, the second-level cache entries are spread across multiple nodes. When using https://infinispan.org/blog/2015/10/01/hibernate-second-level-cache/[Infinispan distributed cache], only `READ_WRITE` and `NONSTRICT_READ_WRITE` are available for read-write caches. Bear in mind that `NONSTRICT_READ_WRITE` offers a weaker consistency guarantee since stale updates are possible. - -[NOTE] -==== -For more about Hibernate Performance Tuning, check out the https://www.youtube.com/watch?v=BTdTEe9QL5k&t=1s[High-Performance Hibernate] presentation from Devoxx France. -==== \ No newline at end of file diff --git a/documentation/src/main/asciidoc/userguide/appendices/Configurations.adoc b/documentation/src/main/asciidoc/userguide/appendices/Configurations.adoc deleted file mode 100644 index f3e9f8bfbd68..000000000000 --- a/documentation/src/main/asciidoc/userguide/appendices/Configurations.adoc +++ /dev/null @@ -1,1179 +0,0 @@ -[[configurations]] -== Configurations - -[[configurations-strategy]] -=== Strategy configurations - -Many configuration settings define pluggable strategies that Hibernate uses for various purposes. -The configurations of many of these strategy type settings accept definition in various forms. -The documentation of such configuration settings refers here. -The types of forms available in such cases include: - -short name (if defined):: - Certain built-in strategy implementations have a corresponding short name -strategy instance:: - An instance of the strategy implementation to use can be specified -strategy Class reference:: - A `java.lang.Class` reference of the strategy implementation to use -strategy Class name:: - The class name (`java.lang.String`) of the strategy implementation to use - -[[configurations-general]] -=== General configuration - -`*hibernate.dialect*` (e.g. `org.hibernate.dialect.PostgreSQLDialect`):: -The class name of a Hibernate https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`] from which Hibernate can generate SQL optimized for a particular relational database. -+ -In most cases, Hibernate can choose the correct https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html[`Dialect`] implementation based on the JDBC metadata returned by the JDBC driver. -+ -`*hibernate.current_session_context_class*` (e.g. `jta`, `thread`, `managed`, or a custom class implementing `org.hibernate.context.spi.CurrentSessionContext`):: -+ -Supplies a custom strategy for the scoping of the _current_ `Session`. -+ -The definition of what exactly _current_ means is controlled by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentSessionContext.html[`CurrentSessionContext`] implementation in use. -+ -Note that for backward compatibility, if a `CurrentSessionContext` is not configured but JTA is configured this will default to the `JTASessionContext`. - -[[configurations-jpa-compliance]] -=== Jakarta Persistence compliance - -`*hibernate.jpa.compliance.transaction*` (e.g. `true` or `false` (default value)):: -This setting controls if Hibernate `Transaction` should behave as defined by the spec for Jakarta Persistence's `jakarta.persistence.EntityTransaction` -since it extends the Jakarta Persistence one. - -`*hibernate.jpa.compliance.query*` (e.g. `true` or `false` (default value)):: -Controls whether Hibernate's handling of `jakarta.persistence.Query` (JPQL, Criteria and native query) should strictly follow the Jakarta Persistence spec. -+ -This includes both in terms of parsing or translating a query as well as calls to the `jakarta.persistence.Query` methods throwing spec -defined exceptions whereas Hibernate might not. - -`*hibernate.jpa.compliance.list*` (e.g. `true` or `false` (default value)):: -Controls whether Hibernate should recognize what it considers a "bag" (`org.hibernate.collection.internal.PersistentBag`) -as a List (`org.hibernate.collection.internal.PersistentList`) or as a bag. -+ -If enabled, we will recognize it as a List where `jakarta.persistence.OrderColumn` -is just missing (and its defaults will apply). - -`*hibernate.jpa.compliance.closed*` (e.g. `true` or `false` (default value)):: -Jakarta Persistence defines specific exceptions upon calling specific methods on `jakarta.persistence.EntityManager` and `jakarta.persistence.EntityManagerFactory` -objects which have been closed previously. -+ -This setting controls whether the Jakarta Persistence spec-defined behavior or the Hibernate behavior will be used. -+ -If enabled, Hibernate will operate in the Jakarta Persistence specified way, throwing exceptions when the spec says it should. - -`*hibernate.jpa.compliance.proxy*` (e.g. `true` or `false` (default value)):: -The Jakarta Persistence spec says that a `jakarta.persistence.EntityNotFoundException` should be thrown when accessing an entity proxy -which does not have an associated table row in the database. -+ -Traditionally, Hibernate does not initialize an entity proxy when accessing its identifier since we already know the identifier value, -hence we can save a database roundtrip. -+ -If enabled Hibernate will initialize the entity proxy even when accessing its identifier. - -`*hibernate.jpa.compliance.global_id_generators*` (e.g. `true` or `false` (default value) ):: -The Jakarta Persistence spec says that the scope of TableGenerator and SequenceGenerator names is global to the persistence unit (across all generator types). -+ -Traditionally, Hibernate has considered the names locally scoped. -+ -If enabled, the names used by `@TableGenerator` and `@SequenceGenerator` will be considered global so configuring two different generators -with the same name will cause a `java.lang.IllegalArgumentException` to be thrown at boot time. - -[[configurations-database-connection]] -=== Database connection properties - -`*hibernate.connection.driver_class*` or `*jakarta.persistence.jdbc.driver*` (e.g. `org.postgresql.Driver`):: -Names the JDBC `Driver` class name. - -`*hibernate.connection.url*` or `*jakarta.persistence.jdbc.url*` (e.g. `jdbc:postgresql:hibernate_orm_test`):: -Names the JDBC connection URL. - -`*hibernate.connection.username*` or `*jakarta.persistence.jdbc.user*`:: -Names the JDBC connection user name. - -`*hibernate.connection.password*` or `*jakarta.persistence.jdbc.password*`:: -Names the JDBC connection password. - -`*hibernate.connection.isolation*` (e.g. `REPEATABLE_READ` or `Connection.TRANSACTION_REPEATABLE_READ`):: -Names the JDBC connection transaction isolation level. - -`*hibernate.connection.autocommit*` (e.g. `true` or `false` (default value)):: -Names the initial autocommit mode for JDBC Connections returned from a connection pool created in certain ConnectionProvider impl. -+ -See discussion of `hibernate.connection.provider_disables_autocommit` as well. - -`*hibernate.connection.provider_disables_autocommit*` (e.g. `true` or `false` (default value)):: -Indicates a promise by the user that Connections that Hibernate obtains from the configured ConnectionProvider -have auto-commit disabled when they are obtained from that provider, whether that provider is backed by -a DataSource or some other Connection pooling mechanism. Generally, this occurs when: -* Hibernate is configured to get Connections from an underlying DataSource, and that DataSource is already configured to disable auto-commit on its managed Connections. -* Hibernate is configured to get Connections from a non-DataSource connection pool and that connection pool is already configured to disable auto-commit. -For the Hibernate provided implementation this will depend on the value of `hibernate.connection.autocommit` setting. -+ -Hibernate uses this assurance as an opportunity to opt out of certain operations that may have a performance -impact (although this impact is generally negligible). Specifically, when a transaction is started via the -Hibernate or Jakarta Persistence transaction APIs Hibernate will generally immediately acquire a Connection from the -provider and: -* check whether the Connection is initially in auto-commit mode via a call to `Connection#getAutocommit` to know how to clean up the Connection when released. -* start a JDBC transaction by calling `Connection#setAutocommit(false)`. -+ -We can skip both of those steps if we know that the ConnectionProvider will always return Connections with auto-commit disabled. -That is the purpose of this setting. By setting it to `true`, the `Connection` acquisition can be delayed until the first -SQL statement is needed to be executed. The connection acquisition delay allows you to reduce the database connection lease -time, therefore allowing you to increase the transaction throughput. -+ -==== -It is *inappropriate* to set this value to `true` when the Connections Hibernate gets -from the provider do not, in fact, have auto-commit disabled. - -Doing so will lead to Hibernate executing SQL operations outside of any JDBC/SQL transaction. -==== - -`*hibernate.connection.handling_mode*`:: -Specifies how Hibernate should manage JDBC connections in terms of acquiring and releasing. -+ -The connection handling mode strategies are defined by the -https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/jdbc/spi/PhysicalConnectionHandlingMode.html[`PhysicalConnectionHandlingMode`] enumeration. -+ -The configuration can be either a `PhysicalConnectionHandlingMode` reference or its case-insensitive `String` representation. -+ -For more details about the `PhysicalConnectionHandlingMode` and Hibernate connection handling, check out the -<> section. - -`*hibernate.connection.datasource*`:: -Either a `javax.sql.DataSource` instance or a JNDI name under which to locate the `DataSource`. -+ -For JNDI names, ses also `hibernate.jndi.class`, `hibernate.jndi.url`, `hibernate.jndi`. - -`*hibernate.connection*`:: - Names a prefix used to define arbitrary JDBC connection properties. These properties are passed along to the JDBC provider when creating a connection. -`*hibernate.connection.provider_class*` (e.g. `org.hibernate.hikaricp.internal. HikariCPConnectionProvider`):: -Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/connections/spi/ConnectionProvider.html[`ConnectionProvider`] to use for obtaining JDBC connections. -+ -Can reference: -+ -** an instance of `ConnectionProvider` -** a `Class` object reference -** a fully qualified name of a class implementing `ConnectionProvider` -+ - -The term `class` appears in the setting name due to legacy reasons. However, it can accept instances. - -`*hibernate.jndi.class*`:: -Names the JNDI `javax.naming.InitialContext` class. - -`*hibernate.jndi.url*` (e.g. `java:global/jdbc/default`):: -Names the JNDI provider/connection url. - -`*hibernate.jndi*`:: -Names a prefix used to define arbitrary JNDI `javax.naming.InitialContext` properties. -+ -These properties are passed along to `javax.naming.InitialContext#InitialContext(java.util.Hashtable)` method. - -==== Hibernate internal connection pool options - -`*hibernate.connection.initial_pool_size*` (e.g. 1 (default value)):: -Minimum number of connections for the built-in Hibernate connection pool. - -`*hibernate.connection.pool_size*` (e.g. 20 (default value)):: -Maximum number of connections for the built-in Hibernate connection pool. - -`*hibernate.connection.pool_validation_interval*` (e.g. 30 (default value)):: -The number of seconds between two consecutive pool validations. During validation, the pool size can increase or decrease based on the connection acquisition request count. - -[[configurations-c3p0]] -=== c3p0 properties - -`*hibernate.c3p0.min_size*` (e.g. 1):: - Minimum size of C3P0 connection pool. Refers to https://www.mchange.com/projects/c3p0/#minPoolSize[c3p0 `minPoolSize` setting]. - -`*hibernate.c3p0.max_size*` (e.g. 5):: - Maximum size of C3P0 connection pool. Refers to https://www.mchange.com/projects/c3p0/#maxPoolSize[c3p0 `maxPoolSize` setting]. - -`*hibernate.c3p0.timeout*` (e.g. 30):: - Maximum idle time for C3P0 connection pool. Refers to https://www.mchange.com/projects/c3p0/#maxIdleTime[c3p0 `maxIdleTime` setting]. - -`*hibernate.c3p0.max_statements*` (e.g. 5):: - Maximum size of C3P0 statement cache. Refers to https://www.mchange.com/projects/c3p0/#maxStatements[c3p0 `maxStatements` setting]. - -`*hibernate.c3p0.acquire_increment*` (e.g. 2):: - The number of connections acquired at a time when there's no connection available in the pool. Refers to https://www.mchange.com/projects/c3p0/#acquireIncrement[c3p0 `acquireIncrement` setting]. - -`*hibernate.c3p0.idle_test_period*` (e.g. 5):: - Idle time before a C3P0 pooled connection is validated. Refers to https://www.mchange.com/projects/c3p0/#idleConnectionTestPeriod[c3p0 `idleConnectionTestPeriod` setting]. - -`*hibernate.c3p0*`:: - A setting prefix used to indicate additional c3p0 properties that need to be passed to the underlying c3p0 connection pool. - -[[configurations-mapping]] -=== Mapping Properties - -==== Table qualifying options - -`*hibernate.default_catalog*` (e.g. A catalog name):: -Qualifies unqualified table names with the given catalog in generated SQL. - -`*hibernate.default_schema*` (e.g. A schema name):: -Qualify unqualified table names with the given schema or tablespace in generated SQL. - -`*hibernate.schema_name_resolver*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/env/spi/SchemaNameResolver.html[`org.hibernate.engine.jdbc.env.spi.SchemaNameResolver`] implementation class):: -By default, Hibernate uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html#getSchemaNameResolver--[`org.hibernate.dialect.Dialect#getSchemaNameResolver`]. -You can customize how the schema name is resolved by providing a custom implementation of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/env/spi/SchemaNameResolver.html[`SchemaNameResolver`] interface. - -==== Identifier options - -`*hibernate.use_identifier_rollback*` (e.g. `true` or `false` (default value)):: -If true, generated identifier properties are reset to default values when objects are deleted. - -`*hibernate.id.optimizer.pooled.preferred*` (e.g. `none`, `hilo`, `legacy-hilo`, `pooled` (default value), `pooled-lo`, `pooled-lotl` or a fully-qualified name of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/id/enhanced/Optimizer.html[`Optimizer`] implementation):: -When a generator specified an increment-size and an optimizer was not explicitly specified, which of the _pooled_ optimizers should be preferred? - -`*hibernate.id.generator.stored_last_used*` (e.g. `true` (default value) or `false`):: -If true, the value stored in the table used by the `@TableGenerator` is the last value used, if false the value is the next value to be used. - -`*hibernate.model.generator_name_as_sequence_name*` (e.g. `true` (default value) or `false`):: -If true, the value specified by the `generator` attribute of the `@GeneratedValue` annotation should be used as the sequence/table name when no matching -`@SequenceGenerator` or `TableGenerator` is found. -+ -The default value is `true` meaning that `@GeneratedValue.generator()` will be used as the sequence/table name by default. -Users migrating from earlier versions using the legacy `hibernate_sequence` name should disable this setting. - -`*hibernate.ejb.identifier_generator_strategy_provider*` (e.g. fully-qualified class name or an actual https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/spi/IdentifierGeneratorStrategyProvider.html[`IdentifierGeneratorStrategyProvider`] instance):: -This setting allows you to provide an instance or the class implementing the `org.hibernate.jpa.spi.IdentifierGeneratorStrategyProvider` interface, -so you can provide a set of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/id/IdentifierGenerator.html[`IdentifierGenerator`] strategies allowing to override the Hibernate Core default ones. - -`*hibernate.id.disable_delayed_identity_inserts*` (e.g. `true` or `false` (default value)):: -If true, inserts that use generated-identifiers (identity/sequences) will never be delayed and will always be inserted immediately. -This should be used if you run into any errors with `DelayedPostInsertIdentifier` and should be considered a _temporary_ fix. -Please report your mapping that causes the problem to us so we can examine the default algorithm to see if your use case should be included. -+ -The default value is `false` which means Hibernate will use an algorithm to determine if the insert can be delayed or if the insert should be performed immediately. - -`*hibernate.id.sequence.increment_size_mismatch_strategy*` (e.g. `LOG`, `FIX`, `NONE` or `EXCEPTION` (default value)):: -This setting defines the `org.hibernate.id.SequenceMismatchStrategy` used when -Hibernate detects a mismatch between a sequence configuration in an entity mapping -and its database sequence object counterpart. -+ -The default value is given by the `org.hibernate.id.SequenceMismatchStrategy#EXCEPTION`, -meaning that an Exception is thrown when detecting such a conflict. - -==== Quoting options - -`*hibernate.globally_quoted_identifiers*` (e.g. `true` or `false` (default value)):: -Should all database identifiers be quoted. - -`*hibernate.globally_quoted_identifiers_skip_column_definitions*` (e.g. `true` or `false` (default value)):: -Assuming `hibernate.globally_quoted_identifiers` is `true`, this allows the global quoting to skip column-definitions as defined by `jakarta.persistence.Column`, -`jakarta.persistence.JoinColumn`, etc., and while it avoids column-definitions being quoted due to global quoting, they can still be explicitly quoted in the annotation/xml mappings. - -`*hibernate.auto_quote_keyword*` (e.g. `true` or `false` (default value)):: -Specifies whether to automatically quote any names that are deemed keywords. - -==== Time zone storage -`*hibernate.timezone.default_storage*` (e.g. `COLUMN`, `NATIVE`, `AUTO` or `NORMALIZE` (default value)):: -Global setting for configuring the default storage for the time zone information for time zone based types. -+ -`NORMALIZE`::: Does not store the time zone information, and instead normalizes timestamps to UTC -`COLUMN`::: Stores the time zone information in a separate column; works in conjunction with `@TimeZoneColumn` -`NATIVE`::: Stores the time zone information by using the `with time zone` type. Error if `Dialect#getTimeZoneSupport()` is not `NATIVE` -`AUTO`::: Stores the time zone information either with `NATIVE` if `Dialect#getTimeZoneSupport()` is `NATIVE`, otherwise uses the `COLUMN` strategy. -+ -The default value is given by the {@link org.hibernate.annotations.TimeZoneStorageType#NORMALIZE}, -meaning that time zone information is not stored by default, but timestamps are normalized instead. -+ -See the discussion https://github.com/hibernate/hibernate-orm/discussions/4201[on GitHub] for additional background info. - -==== Discriminator options -`*hibernate.discriminator.implicit_for_joined*` (e.g. `true` or `false` (default value)):: -The legacy behavior of Hibernate is to not use discriminators for joined inheritance (Hibernate does not need the discriminator). -However, some Jakarta Persistence providers do need the discriminator for handling joined inheritance so, in the interest of portability, this capability has been added to Hibernate too. -+ -Because want to make sure that legacy applications continue to work as well, that puts us in a bind in terms of how to handle _implicit_ discriminator mappings. -The solution is to assume that the absence of discriminator metadata means to follow the legacy behavior _unless_ this setting is enabled. -+ -With this setting enabled, Hibernate will interpret the absence of discriminator metadata as an indication to use the Jakarta Persistence-defined defaults for these absent annotations. -+ -See Hibernate Jira issue https://hibernate.atlassian.net/browse/HHH-6911[HHH-6911] for additional background info. - -`*hibernate.discriminator.ignore_explicit_for_joined*` (e.g. `true` or `false` (default value)):: -The legacy behavior of Hibernate is to not use discriminators for joined inheritance (Hibernate does not need the discriminator). -However, some Jakarta Persistence providers do need the discriminator for handling joined inheritance so, in the interest of portability, this capability has been added to Hibernate too. -+ -Existing applications rely (implicitly or explicitly) on Hibernate ignoring any `DiscriminatorColumn` declarations on joined inheritance hierarchies. -This setting allows these applications to maintain the legacy behavior of `DiscriminatorColumn` annotations being ignored when paired with joined inheritance. -+ -See Hibernate Jira issue https://hibernate.atlassian.net/browse/HHH-6911[HHH-6911] for additional background info. - -==== Naming strategies - -`*hibernate.implicit_naming_strategy*` (e.g. `default` (default value), `jpa`, `legacy-jpa`, `legacy-hbm`, `component-path`):: -Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategy.html[`ImplicitNamingStrategy`] class to use. -The following short names are defined for this setting: -`default`::: Uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategyJpaCompliantImpl.html[`ImplicitNamingStrategyJpaCompliantImpl`] -`jpa`::: Uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategyJpaCompliantImpl.html[`ImplicitNamingStrategyJpaCompliantImpl`] -`legacy-jpa`::: Uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategyLegacyJpaImpl.html[`ImplicitNamingStrategyLegacyJpaImpl`] -`legacy-hbm`::: Uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategyLegacyHbmImpl.html[`ImplicitNamingStrategyLegacyHbmImpl`] -`component-path`::: Uses the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/ImplicitNamingStrategyComponentPathImpl.html[`ImplicitNamingStrategyComponentPathImpl`] -+ -If this property happens to be empty, the fallback is to use the `default` strategy. - -`*hibernate.physical_naming_strategy*` (e.g. `org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl` (default value)):: -Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/naming/PhysicalNamingStrategy.html[`PhysicalNamingStrategy`] class to use. - -==== Metadata scanning options - -`*hibernate.archive.scanner*`:: -Pass an implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/spi/Scanner.html[`Scanner`]. -By default, https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/internal/StandardScanner.html[`StandardScanner`] is used. -+ -Accepts: -+ -** an actual `Scanner` instance -** a reference to a Class that implements `Scanner` -** a fully qualified name of a Class that implements `Scanner` - -`*hibernate.archive.interpreter*`:: -Pass https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/spi/ArchiveDescriptorFactory.html[`ArchiveDescriptorFactory`] to use in the scanning process. -+ -Accepts: -+ -** an actual `ArchiveDescriptorFactory` instance -** a reference to a Class that implements `ArchiveDescriptorFactory` -** a fully qualified name of a Class that implements `ArchiveDescriptorFactory` -+ - -See information on https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/archive/scan/spi/Scanner.html[`Scanner`] about expected constructor forms. - -`*hibernate.archive.autodetection*` (e.g. `hbm,class` (default value)):: -Identifies a comma-separated list of values indicating the mapping types we should auto-detect during scanning. -+ -Allowable values include: -+ -`class`::: scan classes (e.g. `.class`) to extract entity mapping metadata -`hbm`::: scan `hbm` mapping files (e.g. `hbm.xml`) to extract entity mapping metadata -+ - -By default HBM, annotations, and Jakarta Persistence XML mappings are scanned. -+ -When using Jakarta Persistence, to disable the automatic scanning of all entity classes, the `exclude-unlisted-classes` `persistence.xml` element must be set to true. -Therefore, when setting `exclude-unlisted-classes` to true, only the classes that are explicitly declared in the `persistence.xml` configuration files are going to be taken into consideration. - -`*hibernate.mapping.precedence*` (e.g. `hbm,class` (default value)):: -Used to specify the order in which metadata sources should be processed. -Value is a delimited-list whose elements are defined by https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cfg/MetadataSourceType.html[`MetadataSourceType`]. -+ -The default is `hbm,class`, therefore `hbm.xml` files are processed first, followed by annotations (combined with `orm.xml` mappings). -+ -When using Jakarta Persistence, the XML mapping overrides a conflicting annotation mapping that targets the same entity attribute. - -==== JDBC-related options - -`*hibernate.use_nationalized_character_data*` (e.g. `true` or `false` (default value)):: -Enable nationalized character support on all string / clob based attribute ( string, char, clob, text, etc. ). - -`*hibernate.jdbc.lob.non_contextual_creation*` (e.g. `true` or `false` (default value)):: -Should we not use contextual LOB creation (aka based on `java.sql.Connection#createBlob()` et al)? The default value for HANA, H2, and PostgreSQL is `true`. - -`*hibernate.jdbc.time_zone*` (e.g. A `java.util.TimeZone`, a `java.time.ZoneId` or a `String` representation of a `ZoneId`):: -Unless specified, the JDBC Driver uses the default JVM time zone. If a different time zone is configured via this setting, the JDBC https://docs.oracle.com/javase/8/docs/api/java/sql/PreparedStatement.html#setTimestamp-int-java.sql.Timestamp-java.util.Calendar-[PreparedStatement#setTimestamp] is going to use a `Calendar` instance according to the specified time zone. - -`*hibernate.dialect.oracle.prefer_long_raw*` (e.g. `true` or `false` (default value)):: -This setting applies to Oracle Dialect only, and it specifies whether `byte[]` or `Byte[]` arrays should be mapped to the deprecated `LONG RAW` (when this configuration property value is `true`) or to a `BLOB` column type (when this configuration property value is `false`). - -`*hibernate.type.preferred_boolean_jdbc_type*` (e.g. `-7` for `java.sql.Types.BIT`):: -Global setting identifying the preferred JDBC type code for storing boolean values. The fallback is to ask the Dialect. -Can also specify the name of the constant in `org.hibernate.type.SqlTypes` instead. - -`*hibernate.type.preferred_uuid_jdbc_type*` (e.g. `1` for `java.sql.Types.CHAR` or `3000` for `org.hibernate.types.SqlTypes.UUID` (default value)):: -Global setting identifying the preferred JDBC type code for storing uuid values. -Can also specify the name of the constant in `org.hibernate.type.SqlTypes` instead. - -`*hibernate.type.preferred_duration_jdbc_type*` (e.g. `2` for `java.sql.Types.NUMERIC` or `3100` for `org.hibernate.types.SqlTypes.INTERVAL_SECOND` (default value)):: -Global setting identifying the preferred JDBC type code for storing duration values. -Can also specify the name of the constant in `org.hibernate.type.SqlTypes` instead. - -`*hibernate.type.preferred_instant_jdbc_type*` (e.g. `93` for `java.sql.Types.TIMESTAMP` or `3003` for `org.hibernate.types.SqlTypes.TIMESTAMP_UTC` (default value)):: -Global setting identifying the preferred JDBC type code for storing instant values. -Can also specify the name of the constant in `org.hibernate.type.SqlTypes` instead. - -==== Bean Validation options -`*jakarta.persistence.validation.factory*` (e.g. `jakarta.validation.ValidationFactory` implementation):: -Specify the `javax.validation.ValidationFactory` implementation to use for Bean Validation. - -`*hibernate.check_nullability*` (e.g. `true` or `false`):: -Enable nullability checking. Raises an exception if a property marked as not-null is null. -+ -Default to `false` if Bean Validation is present in the classpath and Hibernate Annotations is used, `true` otherwise. -`*hibernate.validator.apply_to_ddl*` (e.g. `true` (default value) or `false`):: -+ -Bean Validation constraints will be applied in DDL if the automatic schema generation is enabled. -In other words, the database schema will reflect the Bean Validation constraints. -+ -To disable constraint propagation to DDL, set up `hibernate.validator.apply_to_ddl` to `false` in the configuration file. -Such a need is very uncommon and not recommended. - -[[misc-options]] -==== Misc options - -`*hibernate.create_empty_composites.enabled*` (e.g. `true` or `false` (default value)):: - Enable instantiation of composite/embeddable objects when all of its attribute values are `null`. The default (and historical) behavior is that a `null` reference will be used to represent the composite when all of its attributes are ``null``s. -+ -This is an experimental feature that has known issues. It should not be used in production until it is stabilized. See Hibernate Jira issue https://hibernate.atlassian.net/browse/HHH-11936[HHH-11936] for details. - -`*hibernate.entity_dirtiness_strategy*` (e.g. fully-qualified class name or an actual `CustomEntityDirtinessStrategy` instance):: -Setting to identify an `org.hibernate.CustomEntityDirtinessStrategy` to use. - -`*hibernate.type.json_format_mapper*` (e.g. A fully-qualified class name, an instance, or a `Class` object reference):: -Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/type/FormatMapper.html[`FormatMapper`] implementation to be applied to the `SessionFactory` for JSON serialization and deserialization. -+ -Can reference a -`FormatMapper` instance, -`FormatMapper` implementation `Class` reference, -`FormatMapper` implementation class name (fully-qualified class name) or -one of the following shorthand constants `jackson` or `jsonb`. -By default, the first of the possible providers that is available in the runtime is used, according to the listing order. -+ -Note that the default serialization format of collections can differ depending on the serialization library. - -`*hibernate.type.xml_format_mapper*` (e.g. A fully-qualified class name, an instance, or a `Class` object reference):: -Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/type/FormatMapper.html[`FormatMapper`] implementation to be applied to the `SessionFactory` for XML serialization and deserialization. -+ -Can reference a -`FormatMapper` instance, -`FormatMapper` implementation `Class` reference, -`FormatMapper` implementation class name (fully-qualified class name) or -one of the following shorthand constants `jackson-xml` or `jaxb`. -By default, the first of the possible providers that is available in the runtime is used, according to the listing order. -+ -Note that the default serialization format of collections can differ depending on the serialization library. - -[[configurations-bytecode-enhancement]] -=== Bytecode Enhancement Properties - -`*hibernate.enhancer.enableDirtyTracking*` (e.g. `true` (default value) or `false`):: -Enable dirty tracking feature in runtime bytecode enhancement. This setting is deprecated for removal without a replacement. - -`*hibernate.enhancer.enableLazyInitialization*` (e.g. `true` (default value) or `false`):: -Enable lazy loading feature in runtime bytecode enhancement. This way, even basic types (e.g. `@Basic(fetch = FetchType.LAZY`)) can be fetched lazily. This setting is deprecated for removal without a replacement. - -`*hibernate.enhancer.enableAssociationManagement*` (e.g. `true` or `false` (default value)):: -Enable association management feature in runtime bytecode enhancement which automatically synchronizes a bidirectional association when only one side is changed. - -`*hibernate.bytecode.provider*` (e.g. `bytebuddy` (default value)):: -The https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/bytecode/spi/BytecodeProvider.html[`BytecodeProvider`] built-in implementation flavor. Currently, only `bytebuddy` is a valid value, as older deprecated options have been removed. - -`*hibernate.bytecode.use_reflection_optimizer*` (e.g. `true` (default value) or `false`):: -Should we use reflection optimization? The reflection optimizer implements the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/bytecode/spi/ReflectionOptimizer.html[`ReflectionOptimizer`] interface and improves entity instantiation and property getter/setter calls. This setting is deprecated for removal without a replacement. - -[[configurations-query]] -=== Query settings - -`*hibernate.query.plan_cache_max_size*` (e.g. `2048` (default value)):: -The maximum number of entries including: -https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/query/spi/HQLQueryPlan.html[`HQLQueryPlan`], -https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/query/spi/FilterQueryPlan.html[`FilterQueryPlan`], -https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/query/spi/NativeSQLQueryPlan.html[`NativeSQLQueryPlan`]. -+ -Maintained by https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/query/spi/QueryPlanCache.html[`QueryPlanCache`]. - -`*hibernate.query.plan_parameter_metadata_max_size*` (e.g. `128` (default value)):: -The maximum number of strong references associated with `ParameterMetadata` maintained by https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/query/spi/QueryPlanCache.html[`QueryPlanCache`]. - -`*hibernate.order_by.default_null_ordering*` (e.g. `none`, `first` or `last`):: -Defines precedence of null values in `ORDER BY` clause. Defaults to `none` which varies between RDBMS implementation. - -`*hibernate.discriminator.force_in_select*` (e.g. `true` or `false` (default value)):: -For entities which do not explicitly say, should we force discriminators into SQL selects? - -`*hibernate.query.jpaql_strict_compliance*` (e.g. `true` or `false` (default value)):: -Map from tokens in Hibernate queries to SQL tokens, such as function or literal names. -+ -Should we strictly adhere to Jakarta Persistence Query Language (JPQL) syntax, or more broadly support all of Hibernate's superset (HQL)? -+ -Setting this to `true` may cause valid HQL to throw an exception because it violates the JPQL subset. - -`*hibernate.query.startup_check*` (e.g. `true` (default value) or `false`):: -Should named queries be checked during startup? - -`*hibernate.proc.param_null_passing*` (e.g. `true` or `false` (default value)):: -Global setting for whether `null` parameter bindings should be passed to database procedure/function calls as part of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/procedure/ProcedureCall.html[`ProcedureCall`] handling. -Implicitly Hibernate will not pass the `null`, the intention being to allow any default argument values to be applied. -+ -This defines a global setting, which can then be controlled per parameter via `org.hibernate.procedure.ParameterRegistration#enablePassingNulls(boolean)`. -+ -Values are `true` (pass the NULLs) or `false` (do not pass the NULLs). - -`*hibernate.jdbc.log.warnings*` (e.g. `true` or `false`):: -Enable fetching JDBC statement warning for logging. Default value is given by `org.hibernate.dialect.Dialect#isJdbcLogWarningsEnabledByDefault()`. - -`*hibernate.session_factory.statement_inspector*` (e.g. A fully-qualified class name, an instance, or a `Class` object reference):: -Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/jdbc/spi/StatementInspector.html[`StatementInspector`] implementation to be applied to every `Session` created by the current `SessionFactory`. -+ -Can reference a -`StatementInspector` instance, -`StatementInspector` implementation `Class` reference or -`StatementInspector` implementation class name (fully-qualified class name). - -`*hibernate.criteria.value_handling_mode*` (e.g. `BIND` (default value) or `INLINE`):: -By default, Criteria queries uses bind parameters for any value passed through the Jakarta Persistence Criteria API. -+ -The {@link org.hibernate.query.criteria.ValueHandlingMode#BIND} mode (default) will use bind variables for any value. -+ -The {@link org.hibernate.query.criteria.ValueHandlingMode#INLINE} mode will inline values as literals. -+ -The default value is {@link org.hibernate.query.criteria.ValueHandlingMode#BIND}. -Valid options are defined by the `org.hibernate.query.criteria.ValueHandlingMode` enum. - -`*hibernate.criteria.copy_tree*` (e.g. `true` or `false` (default value) ):: -The Jakarta Persistence spec says that mutations done to `CriteriaQuery`, `CriteriaUpdate` and `CriteriaDelete` -after such objects were used to create a `jakarta.persistence.Query` may not affect that query. -This requirement makes it necessary to copy these objects because the APIs allow mutations. -+ -If disabled, it is assumed that users do not mutate the criteria query afterwards -and due to that, no copy will be created, which will improve performance. -+ -When bootstrapping Hibernate through the native bootstrap APIs this setting is disabled -i.e. no copies are created to not hurt performance. -When bootstrapping Hibernate through the JPA SPI this setting is enabled. -When enabled, criteria query objects are copied, as required by the Jakarta Persistence specification. - -`*hibernate.query.fail_on_pagination_over_collection_fetch*` (e.g. `true` or `false` (default value)):: -Raises an exception when in-memory pagination over collection fetch is about to be performed. -+ -Disabled by default. Set to true to enable. - -`*hibernate.query.immutable_entity_update_query_handling_mode*` (e.g. `EXCEPTION` or `WARNING` (default value)):: -Defines how `Immutable` entities are handled when executing a bulk update query. -+ -By default, the (`ImmutableEntityUpdateQueryHandlingMode#WARNING`) mode is used, meaning that -a warning log message is issued when an `@Immutable` entity is to be updated via a bulk update statement. -+ -If the (`ImmutableEntityUpdateQueryHandlingMode#EXCEPTION`) mode is used, then a `HibernateException` is thrown instead. - -`*hibernate.query.in_clause_parameter_padding*` (e.g. `true` or `false` (default value)):: -By default, the IN clause expands to include all bind parameter values. -+ -However, for database systems supporting execution plan caching, -there's a better chance of hitting the cache if the number of possible IN clause parameters lowers. -+ -For this reason, we can expand the bind parameters to power-of-two: 4, 8, 16, 32, 64. -This way, an IN clause with 5, 6, or 7 bind parameters will use the 8 IN clause, -therefore reusing its execution plan. - -==== Multi-table bulk HQL operations - -`*hibernate.query.mutation_strategy*` (e.g. A fully-qualified class name, an instance, or a `Class` object reference):: -Provide a custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/query/sqm/mutation/spi/SqmMultiTableMutationStrategy.html[`org.hibernate.query.sqm.mutation.spi.SqmMultiTableMutationStrategy`] implementation for handling multi-table bulk HQL operations. - -`*hibernate.hql.bulk_id_strategy.global_temporary.create_tables*` (e.g. `true` (default value) or `false`):: -For databases that don't support local tables, but just global ones, this configuration property allows you to control whether to CREATE the global tables used for multi-table bulk HQL operations at `SessionFactory` or the `EntityManagerFactory` startup. - -`*hibernate.hql.bulk_id_strategy.global_temporary.drop_tables*` (e.g. `true` or `false` (default value)):: - For databases that don't support local tables, but just global ones, this configuration property allows you to DROP the global tables used for multi-table bulk HQL operations when the `SessionFactory` or the `EntityManagerFactory` is closed. - -`*hibernate.hql.bulk_id_strategy.local_temporary.drop_tables*` (e.g. `true` or `false` (default value)):: -This configuration property allows you to DROP the local temporary tables used for multi-table bulk HQL operations when the `SessionFactory` or `EntityManagerFactory` is closed. This is useful when testing with a single connection pool against different schemas. - -`*hibernate.hql.bulk_id_strategy.persistent.create_tables*` (e.g. `true` (default value) or `false`):: -This configuration property is used by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/hql/spi/id/persistent/PersistentTableBulkIdStrategy.html[`PersistentTableBulkIdStrategy`], that mimics temporary tables for databases which do not support temporary tables. -It follows a pattern similar to the ANSI SQL definition of the global temporary table using a "session id" column to segment rows from the various sessions. -+ -This configuration property allows you to control whether to CREATE the tables used for multi-table bulk HQL operations at `SessionFactory` or `EntityManagerFactory` startup. - -`*hibernate.hql.bulk_id_strategy.persistent.drop_tables*` (e.g. `true` or `false` (default value)):: -This configuration property is used by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/hql/spi/id/persistent/PersistentTableBulkIdStrategy.html[`PersistentTableBulkIdStrategy`], that mimics temporary tables for databases which do not support temporary tables. -It follows a pattern similar to the ANSI SQL definition of the global temporary table using a "session id" column to segment rows from the various sessions. -+ -This configuration property allows you to DROP the tables used for multi-table bulk HQL operations when the `SessionFactory` or the `EntityManagerFactory` is closed. - -`*hibernate.hql.bulk_id_strategy.persistent.schema*` (e.g. Database schema name. By default, the `hibernate.default_schema` is used.):: -This configuration property is used by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/hql/spi/id/persistent/PersistentTableBulkIdStrategy.html[`PersistentTableBulkIdStrategy`], that mimics temporary tables for databases which do not support temporary tables. -It follows a pattern similar to the ANSI SQL definition of the global temporary table using a "session id" column to segment rows from the various sessions. -+ -This configuration property defines the database schema used for storing the temporary tables used for bulk HQL operations. - -`*hibernate.hql.bulk_id_strategy.persistent.catalog*` (e.g. Database catalog name. By default, the `hibernate.default_catalog` is used.):: -This configuration property is used by the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/hql/spi/id/persistent/PersistentTableBulkIdStrategy.html[`PersistentTableBulkIdStrategy`], that mimics temporary tables for databases which do not support temporary tables. -It follows a pattern similar to the ANSI SQL definition of the global temporary table using a "session id" column to segment rows from the various sessions. -+ -This configuration property defines the database catalog used for storing the temporary tables used for bulk HQL operations. - -[[configurations-batch]] -=== Batching properties - -`*hibernate.jdbc.batch_size*` (e.g. 5):: -Maximum JDBC batch size. A nonzero value enables batch updates. - -`*hibernate.order_inserts*` (e.g. `true` or `false` (default value)):: -Forces Hibernate to order SQL inserts by the primary key value of the items being inserted. This preserves batching when using cascading. - -`*hibernate.order_updates*` (e.g. `true` or `false` (default value)):: -Forces Hibernate to order SQL updates by the primary key value of the items being updated. This preserves batching when using cascading and reduces the likelihood of transaction deadlocks in highly-concurrent systems. - -`*hibernate.jdbc.batch_versioned_data*` (e.g. `true`(default value) or `false`):: -Should versioned entities be included in batching? -+ -Set this property to `true` if your JDBC driver returns correct row counts from executeBatch(). This option is usually safe, but is disabled by default. If enabled, Hibernate uses batched DML for automatically versioned data. - -`*hibernate.batch_fetch_style*` (e.g. `LEGACY`(default value)):: -Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] to use. -+ -Can specify either the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] name (case insensitively), or a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/loader/BatchFetchStyle.html[`BatchFetchStyle`] instance. `LEGACY` is the default value. - -`*hibernate.jdbc.batch.builder*` (e.g. the fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation class type or an actual object instance):: - Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/batch/spi/BatchBuilder.html[`BatchBuilder`] implementation to use. - -[[configurations-database-fetch]] -==== Fetching properties - -`*hibernate.max_fetch_depth*` (e.g. a value between `0` and `3`):: -Sets a maximum depth for the outer join fetch tree for single-ended associations. A single-ended association is a one-to-one or many-to-one association. A value of `0` disables default outer join fetching. - -`*hibernate.default_batch_fetch_size*` (e.g. `4`,`8`, or `16`):: -The default size for Hibernate Batch fetching of associations (lazily fetched associations can be fetched in batches to prevent N+1 query problems). - -`*hibernate.jdbc.fetch_size*` (e.g. `0` or an integer):: -A non-zero value determines the JDBC fetch size, by calling `Statement.setFetchSize()`. - -`*hibernate.jdbc.use_scrollable_resultset*` (e.g. `true` or `false`):: -Enables Hibernate to use JDBC2 scrollable resultsets. This property is only relevant for user-supplied JDBC connections. Otherwise, Hibernate uses connection metadata. - -`*hibernate.jdbc.use_get_generated_keys*` (e.g. `true` or `false`):: -Allows Hibernate to use JDBC3 `PreparedStatement.getGeneratedKeys()` to retrieve natively-generated keys after insert. You need the JDBC3+ driver and JRE1.4+. Disable this property if your driver has problems with the Hibernate identifier generators. By default, it tries to detect the driver capabilities from connection metadata. - -`*hibernate.enable_lazy_load_no_trans*` (e.g. `true` or `false` (default value)):: -Allows a detached proxy or lazy collection to be fetched even when not associated with an open session / persistence context, by creating a temporary persistence context when the proxy or collection is accessed. -+ -Enabling this setting can make `LazyInitializationException` go away, but it's much better to use a fetch plan to ensure that needed associations are fully initialized before the session is closed. -+ -IMPORTANT: This setting is not recommended, since it can easily break transaction isolation or lead to data aliasing. - -[[configurations-logging]] -=== Statement logging and statistics - -==== SQL statement logging - -`*hibernate.show_sql*` (e.g. `true` or `false` (default value)):: -Write all SQL statements to the console. This is an alternative to setting the log category `org.hibernate.SQL` to debug. - -`*hibernate.format_sql*` (e.g. `true` or `false` (default value)):: -Pretty-print the SQL in the log and console. - -`*hibernate.highlight_sql*` (e.g. `true` or `false` (default value)):: -Colorize the SQL in the console using ANSI escape codes. - -`*hibernate.use_sql_comments*` (e.g. `true` or `false` (default value)):: -If true, Hibernate generates comments inside the SQL, for easier debugging. - -==== Statistics settings - -`*hibernate.generate_statistics*` (e.g. `true` or `false`):: -Causes Hibernate to collect statistics for performance tuning. - -`*hibernate.stats.factory*` (e.g. the fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/stat/spi/StatisticsFactory.html[`StatisticsFactory`] implementation or an actual instance):: -The `StatisticsFactory` allow you to customize how the Hibernate Statistics are being collected. - -`*hibernate.session.events.log*` (e.g. `true` or `false`):: -A setting to control whether the `org.hibernate.engine.internal.StatisticalLoggingSessionEventListener` is enabled on all `Sessions` (unless explicitly disabled for a given `Session`). -+ -The default value of this setting is determined by the value for `hibernate.generate_statistics`, meaning that if statistics are enabled, then logging of Session metrics is enabled by default too. - -[[configurations-cache]] -=== Cache Properties - -`*hibernate.cache.region.factory_class*` (e.g. `jcache`):: -Either a shortcut name (e.g. `jcache`, `ehcache`) or the fully-qualified name of the `RegionFactory` implementation class. - -`*hibernate.cache.default_cache_concurrency_strategy*`:: -Setting used to give the name of the default https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/annotations/CacheConcurrencyStrategy.html[`CacheConcurrencyStrategy`] to use -when `@jakarta.persistence.Cacheable`, `@org.hibernate.annotations.Cache` or `@org.hibernate.annotations.Cache` is used to override the global setting. - -`*hibernate.cache.use_minimal_puts*` (e.g. `true` (default value) or `false`):: -Optimizes second-level cache operation to minimize writes, at the cost of more frequent reads. This is most useful for clustered caches and is enabled by default for clustered cache implementations. - -`*hibernate.cache.use_query_cache*` (e.g. `true` or `false` (default value)):: -Enables the query cache. You still need to set individual queries to be cachable. - -`*hibernate.cache.use_second_level_cache*` (e.g. `true` (default value) or `false`):: -Enable/disable the second-level cache, which is enabled by default, although the default `RegionFactor` is `NoCachingRegionFactory` (meaning there is no actual caching implementation). - -`*hibernate.cache.query_cache_factory*` (e.g. fully-qualified class name):: -A custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/cache/spi/TimestampsCacheFactory.html[`TimestampsCacheFactory`] interface. The default is the built-in `StandardTimestampsCacheFactory`. - -`*hibernate.cache.region_prefix*` (e.g. A string):: -A prefix for second-level cache region names. - -`*hibernate.cache.use_structured_entries*` (e.g. `true` or `false` (default value)):: -Forces Hibernate to store data in the second-level cache in a more human-readable format. - -`*hibernate.cache.auto_evict_collection_cache*` (e.g. `true` or `false` (default: false)):: -Enables the automatic eviction of a bi-directional association's collection cache when an element in the `ManyToOne` collection is added/updated/removed without properly managing the change on the `OneToMany` side. - -`*hibernate.cache.use_reference_entries*` (e.g. `true` or `false`):: -Optimizes second-level cache operation to store immutable entities (aka "reference") which do not have associations into cache directly. In this case, disassembling and deep copy operations can be avoided. The default value of this property is `false`. - -`*hibernate.classcache*` (e.g. `hibernate.classcache.org.hibernate.ejb.test.Item` = `read-write`):: -Sets the associated entity class cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.classcache. = usage[, region]` where usage is the cache strategy used and region the cache region name. - -`*hibernate.collectioncache*` (e.g. `hibernate.collectioncache.org.hibernate.ejb.test.Item.distributors` = `read-write, RegionName`):: -Sets the associated collection cache concurrency strategy for the designated region. Caching configuration should follow the following pattern `hibernate.collectioncache.. = usage[, region]` where usage is the cache strategy used and region the cache region name. - -[[configurations-infinispan]] -=== Infinispan properties - -For more details about how to customize the Infinispan second-level cache provider, check out the -https://infinispan.org/docs/stable/titles/integrating/integrating.html#configuration_properties[Infinispan User Guide]. - -[[configurations-transactions]] -=== Transactions properties - -`*hibernate.transaction.jta.platform*` (e.g. `JBossAS`, `BitronixJtaPlatform`):: -Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation to use for integrating with JTA systems. -Can reference either a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] instance or the name of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatform.html[`JtaPlatform`] implementation class. - -`*hibernate.jta.prefer_user_transaction*` (e.g. `true` or `false` (default value)):: -Should we prefer using the `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveUserTransaction` over using `org.hibernate.engine.transaction.jta.platform.spi.JtaPlatform#retrieveTransactionManager`? - -`*hibernate.transaction.jta.platform_resolver*`:: -Names the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/transaction/jta/platform/spi/JtaPlatformResolver.html[`JtaPlatformResolver`] implementation to use. - -`*hibernate.jta.cacheTransactionManager*` (e.g. `true` (default value) or `false`):: -A configuration value key used to indicate that it is safe to cache. - -`*hibernate.jta.cacheUserTransaction*` (e.g. `true` or `false` (default value)):: -A configuration value key used to indicate that it is safe to cache. - -`*hibernate.transaction.flush_before_completion*` (e.g. `true` or `false` (default value)):: -Causes the session be flushed during the before completion phase of the transaction. If possible, use built-in and automatic session context management instead. - -`*hibernate.transaction.auto_close_session*` (e.g. `true` or `false` (default value)):: -Causes the session to be closed during the after completion phase of the transaction. If possible, use built-in and automatic session context management instead. - -`*hibernate.transaction.coordinator_class*`:: -Names the implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/spi/TransactionCoordinatorBuilder.html[`TransactionCoordinatorBuilder`] to use for creating https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/spi/TransactionCoordinator.html[`TransactionCoordinator`] instances. -+ -Can be a `TransactionCoordinatorBuilder` instance, `TransactionCoordinatorBuilder` implementation `Class` reference, a `TransactionCoordinatorBuilder` implementation class name (fully-qualified name) or a short name. -+ -The following short names are defined for this setting: -+ -`jdbc`::: Manages transactions via calls to `java.sql.Connection` (default for non-Jakarta Persistence applications). -`jta`::: Manages transactions via JTA. See <>. -+ - -If a Jakarta Persistence application does not provide a setting for `hibernate.transaction.coordinator_class`, Hibernate will -automatically build the proper transaction coordinator based on the transaction type for the persistence unit. -+ -If a non-Jakarta Persistence application does not provide a setting for `hibernate.transaction.coordinator_class`, Hibernate -will use `jdbc` as the default. This default will cause problems if the application actually uses JTA-based transactions. -A non-Jakarta Persistence application that uses JTA-based transactions should explicitly set `hibernate.transaction.coordinator_class=jta` -or provide a custom https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/TransactionCoordinatorBuilder.html[`TransactionCoordinatorBuilder`] that builds a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/resource/transaction/TransactionCoordinator.html[`TransactionCoordinator`] that properly coordinates with JTA-based transactions. - -`*hibernate.jta.track_by_thread*` (e.g. `true` (default value) or `false`):: -A transaction can be rolled back by another thread ("tracking by thread") and not the original application. -Examples of this include a JTA transaction timeout handled by a background reaper thread. -+ -The ability to handle this situation requires checking the Thread ID every time Session is called, so enabling this can certainly have a performance impact. - -[line-through]#`*hibernate.transaction.factory_class*`#:: -+ -WARNING: This is a legacy setting that's been deprecated and you should use the `hibernate.transaction.jta.platform` instead. - -`*hibernate.jta.allowTransactionAccess*`(e.g. `true` (default value) or `false`):: -It allows access to the underlying `org.hibernate.Transaction` even when using JTA -since the Jakarta Persistence specification prohibits this behavior. -+ -If this configuration property is set to `true`, access is granted to the underlying `org.hibernate.Transaction`. -If it's set to `false`, you won't be able to access the `org.hibernate.Transaction`. -+ -The default behavior is to allow access unless the `Session` is bootstrapped via Jakarta Persistence. - -[[configurations-multi-tenancy]] -=== Multi-tenancy settings - -`*hibernate.multiTenancy*` (e.g. `NONE` (default value), `SCHEMA`, `DATABASE`, and `DISCRIMINATOR` (not implemented yet)):: -The multi-tenancy strategy in use. - -`*hibernate.multi_tenant_connection_provider*` (e.g. `true` or `false` (default value)):: -Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/connections/spi/MultiTenantConnectionProvider.html[`MultiTenantConnectionProvider`] implementation to use. As `MultiTenantConnectionProvider` is also a service, can be configured directly through the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/registry/StandardServiceRegistryBuilder.html[`StandardServiceRegistryBuilder`]. - -`*hibernate.tenant_identifier_resolver*`:: -Names a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/context/spi/CurrentTenantIdentifierResolver.html[`CurrentTenantIdentifierResolver`] implementation to resolve the current tenant identifier so that calling `SessionFactory#openSession()` would get a `Session` that's connected to the right tenant. -+ -Can be a `CurrentTenantIdentifierResolver` instance, `CurrentTenantIdentifierResolver` implementation `Class` object reference or a `CurrentTenantIdentifierResolver` implementation class name. - -`*hibernate.multi_tenant.datasource.identifier_for_any*` (e.g. `true` or `false` (default value)):: -When the `hibernate.connection.datasource` property value is resolved to a `javax.naming.Context` object, this configuration property defines the JNDI name used to locate the `DataSource` used for fetching the initial `Connection` which is used to access the database metadata of the underlying database(s) (in situations where we do not have a tenant id, like startup processing). - -[[configurations-hbmddl]] -=== Automatic schema generation - -`*hibernate.hbm2ddl.auto*` (e.g. `none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update`):: -Setting to perform `SchemaManagementTool` actions automatically as part of the `SessionFactory` lifecycle. -Valid options are defined by the `externalHbm2ddlName` value of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/Action.html[`Action`] enum: -+ -`none`::: No action will be performed. -`create-only`::: Database creation will be generated. -`drop`::: Database dropping will be generated. -`create`::: Database dropping will be generated followed by database creation. -`create-drop`::: Drop the schema and recreate it on SessionFactory startup. Additionally, drop the schema on SessionFactory shutdown. -`validate`::: Validate the database schema. -`update`::: Update the database schema. - -`*jakarta.persistence.schema-generation.database.action*` (e.g. `none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update`):: -Setting to perform `SchemaManagementTool` actions automatically as part of the `SessionFactory` lifecycle. -Valid options are defined by the `externalJpaName` value of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/Action.html[`Action`] enum: -+ -`none`::: No action will be performed. -`create`::: Database creation will be generated. -`drop`::: Database dropping will be generated. -`drop-and-create`::: Database dropping will be generated followed by database creation. - -`*jakarta.persistence.schema-generation.scripts.action*` (e.g. `none` (default value), `create-only`, `drop`, `create`, `create-drop`, `validate`, and `update`):: -Setting to perform `SchemaManagementTool` actions writing the commands into a DDL script file. -Valid options are defined by the `externalJpaName` value of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/Action.html[`Action`] enum: -+ -`none`::: No action will be performed. -`create`::: Database creation will be generated. -`drop`::: Database dropping will be generated. -`drop-and-create`::: Database dropping will be generated followed by database creation. - -`*jakarta.persistence.schema-generation-connection*`:: -Allows passing a specific `java.sql.Connection` instance to be used by `SchemaManagementTool`. - -`*jakarta.persistence.database-product-name*`:: -Specifies the name of the database provider in cases where a Connection to the underlying database is not available (aka, mainly in generating scripts). -In such cases, a value for this setting _must_ be specified. -+ -The value of this setting is expected to match the value returned by `java.sql.DatabaseMetaData#getDatabaseProductName()` for the target database. -+ -Additionally, specifying `jakarta.persistence.database-major-version` and/or `jakarta.persistence.database-minor-version` may be required to understand exactly how to generate the required schema commands. - -`*jakarta.persistence.database-major-version*`:: -Specifies the major version of the underlying database, as would be returned by `java.sql.DatabaseMetaData#getDatabaseMajorVersion` for the target database. -+ -This value is used to help more precisely determine how to perform schema generation tasks for the underlying database in cases where `jakarta.persistence.database-product-name` does not provide enough distinction. - -`*jakarta.persistence.database-minor-version*`:: -Specifies the minor version of the underlying database, as would be returned by `java.sql.DatabaseMetaData#getDatabaseMinorVersion` for the target database. -+ -This value is used to help more precisely determine how to perform schema generation tasks for the underlying database in cases where `jakarta.persistence.database-product-name` and `jakarta.persistence.database-major-version` does not provide enough distinction. - -`*jakarta.persistence.schema-generation.create-source*`:: -Specifies whether schema generation commands for schema creation are to be determined based on object/relational mapping metadata, DDL scripts, or a combination of the two. -See https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/SourceType.html[`SourceType`] for valid set of values. -+ -If no value is specified, a default is assumed as follows: -+ -* if source scripts are specified (per `jakarta.persistence.schema-generation.create-script-source`), then `script` is assumed -* otherwise, `metadata` is assumed - -`*jakarta.persistence.schema-generation.drop-source*`:: -Specifies whether schema generation commands for schema dropping are to be determined based on object/relational mapping metadata, DDL scripts, or a combination of the two. -See https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/SourceType.html[`SourceType`] for valid set of values. -+ -If no value is specified, a default is assumed as follows: -+ -* if source scripts are specified (per `jakarta.persistence.schema-generation.drop-script-source`), then the `script` option is assumed -* otherwise, `metadata` is assumed - -`*jakarta.persistence.schema-generation.create-script-source*`:: -Specifies the `create` script file as either a `java.io.Reader` configured for reading of the DDL script file or a string designating a file `java.net.URL` for the DDL script. -+ -Hibernate historically also accepted `hibernate.hbm2ddl.import_files` for a similar purpose, but `jakarta.persistence.schema-generation.create-script-source` should be preferred over `hibernate.hbm2ddl.import_files`. - -`*jakarta.persistence.schema-generation.drop-script-source*`:: - Specifies the `drop` script file as either a `java.io.Reader` configured for reading of the DDL script file or a string designating a file `java.net.URL` for the DDL script. - -`*jakarta.persistence.schema-generation.scripts.create-target*`:: -For cases where the `jakarta.persistence.schema-generation.scripts.action` value indicates that schema creation commands should be written to DDL script file, `jakarta.persistence.schema-generation.scripts.create-target` specifies either a `java.io.Writer` configured for output of the DDL script or a string specifying the file URL for the DDL script. - -`*jakarta.persistence.schema-generation.scripts.drop-target*`:: -For cases where the `jakarta.persistence.schema-generation.scripts.action` value indicates that schema dropping commands should be written to DDL script file, `jakarta.persistence.schema-generation.scripts.drop-target` specifies either a `java.io.Writer` configured for output of the DDL script or a string specifying the file URL for the DDL script. - -`*hibernate.hbm2ddl.schema-generation.script.append*` (e.g. `true` (default value) or `false`):: -For cases where the `jakarta.persistence.schema-generation.scripts.action` value indicates that schema commands should be written to DDL script file, `hibernate.hbm2ddl.schema-generation.script.append` specifies if schema commands should be appended to the end of the file rather than written at the beginning of the file. -Values are `true` for appending schema commands to the end of the file, `false` for writing achema commands at the beginning of the file. - -`*jakarta.persistence.hibernate.hbm2ddl.import_files*` (e.g. `import.sql` (default value)):: -Comma-separated names of the optional files containing SQL DML statements executed during the `SessionFactory` creation. -File order matters, the statements of a given file are executed before the statements of the following one. -+ -These statements are only executed if the schema is created, meaning that `hibernate.hbm2ddl.auto` is set to `create`, `create-drop`, or `update`. -`jakarta.persistence.schema-generation.create-script-source` / `jakarta.persistence.schema-generation.drop-script-source` should be preferred. - -`*jakarta.persistence.sql-load-script-source*`:: -Jakarta Persistence variant of `hibernate.hbm2ddl.import_files`. Specifies a `java.io.Reader` configured for reading of the SQL load script or a string designating the file `java.net.URL` for the SQL load script. -A "SQL load script" is a script that performs some database initialization (INSERT, etc). - -`*hibernate.hbm2ddl.import_files_sql_extractor*`:: -Reference to the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/hbm2ddl/ImportSqlCommandExtractor.html[`ImportSqlCommandExtractor`] implementation class to use for parsing source/import files as defined by `jakarta.persistence.schema-generation.create-script-source`, -`jakarta.persistence.schema-generation.drop-script-source` or `hibernate.hbm2ddl.import_files`. -+ -Reference may refer to an instance, a Class implementing `ImportSqlCommandExtractor` or the fully-qualified name of the `ImportSqlCommandExtractor` implementation. -If the fully-qualified name is given, the implementation must provide a no-arg constructor. -+ -The default value is https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/hbm2ddl/SingleLineSqlCommandExtractor.html[`SingleLineSqlCommandExtractor`]. - -`*hibernate.hbm2ddl.create_namespaces*` (e.g. `true` or `false` (default value)):: -Specifies whether to automatically create the database schema/catalog also. - -`*jakarta.persistence.create-database-schemas*` (e.g. `true` or `false` (default value)):: -The Jakarta Persistence variant of `hibernate.hbm2ddl.create_namespaces`. Specifies whether the persistence provider is to create the database schema(s) in addition to creating database objects (tables, sequences, constraints, etc). -The value of this boolean property should be set to `true` if the persistence provider is to create schemas in the database or to generate DDL that contains "CREATE SCHEMA" commands. -+ -If this property is not supplied (or is explicitly `false`), the provider should not attempt to create database schemas. - -`*hibernate.hbm2ddl.schema_filter_provider*`:: -Used to specify the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaFilterProvider.html[`SchemaFilterProvider`] to be used by `create`, `drop`, `migrate`, and `validate` operations on the database schema. -`SchemaFilterProvider` provides filters that can be used to limit the scope of these operations to specific namespaces, tables and sequences. All objects are included by default. - -`*hibernate.hbm2ddl.jdbc_metadata_extraction_strategy*` (e.g. `grouped` (default value) or `individually`):: -Setting to choose the strategy used to access the JDBC Metadata. -Valid options are defined by the `strategy` value of the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/JdbcMetadaAccessStrategy.html[`JdbcMetadaAccessStrategy`] enum: -+ -`grouped`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute a single `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call to retrieve all the database table in order to determine if all the ``jakarta.persistence.Entity``s have a corresponding mapped database tables. This strategy may require `hibernate.default_schema` and/or `hibernate.default_catalog` to be provided. -`individually`::: https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaMigrator.html[`SchemaMigrator`] and https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/schema/spi/SchemaValidator.html[`SchemaValidator`] execute one `java.sql.DatabaseMetaData#getTables(String, String, String, String[])` call for each `jakarta.persistence.Entity` in order to determine if a corresponding database table exists. - -`*hibernate.hbm2ddl.delimiter*` (e.g. `;`):: -Identifies the delimiter to use to separate schema management statements in script outputs. -The default value is `;`. - -`*hibernate.schema_management_tool*` (e.g. A schema name):: -Used to specify the `SchemaManagementTool` to use for performing schema management. The default is to use `HibernateSchemaManagementTool`. - -`*hibernate.synonyms*` (e.g. `true` or `false` (default value)):: -If enabled, allows schema update and validation to support synonyms. Due to the possibility that this would return duplicate tables (especially in Oracle), this is disabled by default. - -`*hibernate.hbm2ddl.extra_physical_table_types*` (e.g. `BASE TABLE`):: -Identifies a comma-separated list of values to specify extra table types, other than the default `TABLE` value, to recognize as defining a physical table by schema update, creation and validation. - -`*hibernate.hbm2ddl.default_constraint_mode*` (`CONSTRAINT` (default value) or `NO_CONSTRAINT`):: -Default `jakarta.persistence.ConstraintMode` for foreign key mapping if `PROVIDER_DEFAULT` strategy used. - -`*hibernate.schema_update.unique_constraint_strategy*` (e.g. `DROP_RECREATE_QUIETLY`, `RECREATE_QUIETLY`, `SKIP`):: -Unique columns and unique keys both use unique constraints in most dialects. -`SchemaUpdate` needs to create these constraints, but DBs support for finding existing constraints is extremely inconsistent. -Further, non-explicitly-named unique constraints use randomly generated characters. -+ -Therefore, the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/tool/hbm2ddl/UniqueConstraintSchemaUpdateStrategy.html[`UniqueConstraintSchemaUpdateStrategy`] offers the following options: -+ -`DROP_RECREATE_QUIETLY`::: Default option. -Attempt to drop, then (re-)create each unique constraint. Ignore any exceptions being thrown. -`RECREATE_QUIETLY`::: -Attempts to (re-)create unique constraints, ignoring exceptions thrown if the constraint already existed. -`SKIP`::: -Does not attempt to create unique constraints on a schema update. - -`*hibernate.hbm2ddl.charset_name*` (e.g. `Charset.defaultCharset()`):: -Defines the charset (encoding) used for all input/output schema generation resources. By default, Hibernate uses the default charset given by `Charset.defaultCharset()`. This configuration property allows you to override the default JVM setting so that you can specify which encoding is used when reading and writing schema generation resources (e.g. File, URL). - -`*hibernate.hbm2ddl.halt_on_error*` (e.g. `true` or `false` (default value)):: -Whether the schema migration tool should halt on error, therefore terminating the bootstrap process. By default, the `EntityManagerFactory` or `SessionFactory` are created even if the schema migration throws exceptions. To prevent this default behavior, set this property value to `true`. - -[[configurations-session-events]] -=== Session events - -`*hibernate.session.events.auto*`:: -Fully qualified class name implementing the `SessionEventListener` interface. - -`*hibernate.session_factory.interceptor*` (e.g. `org.hibernate.EmptyInterceptor` (default value)):: -Names an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/Interceptor[`Interceptor`] implementation to be applied to every `Session` created by the current `org.hibernate.SessionFactory`. -+ -Can reference: -+ -* `Interceptor` instance -* `Interceptor` implementation `Class` object reference -* `Interceptor` implementation class name - -[line-through]#`*hibernate.ejb.interceptor*`# (e.g. `hibernate.session_factory.interceptor` (default value)):: -+ -WARNING: Deprecated setting. Use `hibernate.session_factory.session_scoped_interceptor` instead. - -`*hibernate.session_factory.session_scoped_interceptor*` (e.g. fully-qualified class name or class reference):: -Names an `org.hibernate.Interceptor` implementation to be applied to the `org.hibernate.SessionFactory` and propagated to each `Session` created from the `SessionFactory`. -+ -This setting identifies an `Interceptor` implementation that is to be applied to every `Session` opened from the `SessionFactory`, -but unlike `hibernate.session_factory.interceptor`, a unique instance of the `Interceptor` is -used for each `Session`. -+ -Can reference: -+ -* `Interceptor` instance -* `Interceptor` implementation `Class` object reference -* `java.util.function.Supplier` instance which is used to retrieve the `Interceptor` instance -+ -NOTE: Specifically, this setting cannot name an `Interceptor` instance. - -[line-through]#`*hibernate.ejb.interceptor.session_scoped*`# (e.g. fully-qualified class name or class reference):: -+ -WARNING: Deprecated setting. Use `hibernate.session_factory.session_scoped_interceptor` instead. -+ -An optional Hibernate interceptor. -+ -The interceptor instance is specific to a given Session instance (and hence is not thread-safe) has to implement `org.hibernate.Interceptor` and have a no-arg constructor. -+ -This property cannot be combined with `hibernate.ejb.interceptor`. - -`*hibernate.ejb.session_factory_observer*` (e.g. fully-qualified class name or class reference):: -Specifies a `SessionFactoryObserver` to be applied to the SessionFactory. The class must have a no-arg constructor. - -`*hibernate.ejb.event*` (e.g. `hibernate.ejb.event.pre-load` = `com.acme.SecurityListener,com.acme.AuditListener`):: -Event listener list for a given event type. The list of event listeners is a comma separated fully qualified class name list. - -[[configurations-classloader]] -=== ClassLoaders property - -`*hibernate.classLoaders*`:: -Used to define a `java.util.Collection` or the `ClassLoader` instance Hibernate should use for class-loading and resource-lookups. - -[[configurations-bootstrap]] -=== Bootstrap properties - -`*hibernate.integrator_provider*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/IntegratorProvider.html[`IntegratorProvider`]):: -Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/integrator/spi/Integrator.html[`Integrator`] which is used during the bootstrap process to integrate various services. - -`*hibernate.strategy_registration_provider*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/StrategyRegistrationProviderList.html[`StrategyRegistrationProviderList`]):: -Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/registry/selector/StrategyRegistrationProvider.html[`StrategyRegistrationProvider`] which is used during the bootstrap process to provide registrations of strategy selector(s). - -`*hibernate.type_contributors*` (e.g. The fully qualified name of an https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/boot/spi/TypeContributorList.html[`TypeContributorList`]):: -Used to define a list of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/model/TypeContributor.html[`TypeContributor`] which is used during the bootstrap process to contribute types. - -`*hibernate.persister.resolver*` (e.g. The fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/persister/spi/PersisterClassResolver.html[`PersisterClassResolver`] or a `PersisterClassResolver` instance):: -Used to define an implementation of the `PersisterClassResolver` interface which can be used to customize how an entity or a collection is being persisted. - -`*hibernate.persister.factory*` (e.g. The fully qualified name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/persister/spi/PersisterFactory.html[`PersisterFactory`] or a `PersisterFactory` instance):: -Like a `PersisterClassResolver`, the `PersisterFactory` can be used to customize how an entity or a collection are being persisted. - -`*hibernate.service.allow_crawling*` (e.g. `true` (default value) or `false`):: -Crawl all available service bindings for an alternate registration of a given Hibernate `Service`. - -`*hibernate.metadata_builder_contributor*` (e.g. The instance, the class or the fully qualified class name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/spi/MetadataBuilderContributor.html[`MetadataBuilderContributor`]):: -Used to define an instance, the class or the fully qualified class name of a https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/boot/spi/MetadataBuilderContributor.html[`MetadataBuilderContributor`] which can be used to configure the `MetadataBuilder` when bootstrapping via the Jakarta Persistence `EntityManagerFactory`. - -[[configurations-misc]] -=== Miscellaneous properties - -`*hibernate.dialect_resolvers*`:: -Names any additional https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectResolver.html[`DialectResolver`] implementations to register with the standard https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/engine/jdbc/dialect/spi/DialectFactory.html[`DialectFactory`]. - -`*hibernate.session_factory_name*` (e.g. A JNDI name):: -Setting used to name the Hibernate `SessionFactory`. -Naming the `SessionFactory` allows for it to be properly serialized across JVMs as long as the same name is used on each JVM. -+ -If `hibernate.session_factory_name_is_jndi` is set to `true`, this is also the name under which the `SessionFactory` is bound into JNDI on startup and from which it can be obtained from JNDI. - -`*hibernate.session_factory_name_is_jndi*` (e.g. `true` (default value) or `false`):: -Does the value defined by `hibernate.session_factory_name` represent a JNDI namespace into which the `org.hibernate.SessionFactory` should be bound and made accessible? -+ -Defaults to `true` for backward compatibility. Set this to `false` if naming a SessionFactory is needed for serialization purposes, but no writable JNDI context exists in the runtime environment or if the user simply does not want JNDI to be used. - -`*hibernate.ejb.entitymanager_factory_name*` (e.g. By default, the persistence unit name is used, otherwise a randomly generated UUID):: -Internally, Hibernate keeps track of all `EntityManagerFactory` instances using the `EntityManagerFactoryRegistry`. The name is used as a key to identify a given `EntityManagerFactory` reference. - -`*hibernate.ejb.cfgfile*` (e.g. `hibernate.cfg.xml` (default value)):: -XML configuration file to use to configure Hibernate. - -`*hibernate.ejb.discard_pc_on_close*` (e.g. `true` or `false` (default value)):: -If true, the persistence context will be discarded (think `clear()` when the method is called). -Otherwise, the persistence context will stay alive till the transaction completion: all objects will remain managed, and any change will be synchronized with the database (default to false, ie wait for transaction completion). - -`*hibernate.ejb.metamodel.population*` (e.g. `enabled` or `disabled`, or `ignoreUnsupported` (default value)):: -Setting that indicates whether to build the Jakarta Persistence types. -+ -Accepts three values: -+ -enabled::: Do the build. -disabled::: Do not do the build. -ignoreUnsupported::: Do the build, but ignore any non-Jakarta Persistence features that would otherwise result in a failure (e.g. `@Any` annotation). - -`*hibernate.jpa.static_metamodel.population*` (e.g. `enabled` or `disabled`, or `skipUnsupported` (default value)):: -Setting that controls whether we seek out Jakarta Persistence _static metamodel_ classes and populate them. -+ -Accepts three values: -+ -enabled::: Do the population. -disabled::: Do not do the population. -skipUnsupported::: Do the population, but ignore any non-Jakarta Persistence features that would otherwise result in the population failing (e.g. `@Any` annotation). - -`*hibernate.delay_cdi_access*` (e.g. `true` or `false` (default value)):: -Defines delayed access to CDI `BeanManager`. Starting in 5.1 the preferred means for CDI bootstrapping is through https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/jpa/event/spi/jpa/ExtendedBeanManager.html[`ExtendedBeanManager`]. - -`*hibernate.resource.beans.container*` (e.g. fully-qualified class name):: -Identifies an explicit `org.hibernate.resource.beans.container.spi.BeanContainer` to be used. -+ -Note that, for CDI-based containers, setting this is not necessary. -Simply pass the `BeanManager` to use via `jakarta.persistence.bean.manager` and optionally specify `hibernate.delay_cdi_access`. -+ -This setting is more meant to integrate non-CDI bean containers such as Spring. - -`*hibernate.allow_update_outside_transaction*` (e.g. `true` or `false` (default value)):: -Setting that allows to perform update operations outside of a transaction boundary. -+ -Accepts two values: -+ -true::: allows to flush an update out of a transaction -false::: does not allow - -`*hibernate.collection_join_subquery*` (e.g. `true` (default value) or `false`):: -Setting which indicates whether or not the new JOINs over collection tables should be rewritten to subqueries. - -`*hibernate.allow_refresh_detached_entity*` (e.g. `true` (default value when using Hibernate native bootstrapping) or `false` (default value when using Jakarta Persistence bootstrapping)):: -Setting that allows to call `jakarta.persistence.EntityManager#refresh(entity)` or `Session#refresh(entity)` on a detached instance even when the `org.hibernate.Session` is obtained from a Jakarta Persistence `jakarta.persistence.EntityManager`. - -`*hibernate.use_entity_where_clause_for_collections*` (e.g., `true` (default) or `false`):: -Setting controls whether an entity's "where" clause, mapped using `@Where(clause = "...")` or `` is taken into account when loading one-to-many or many-to-many collections of that type of entity. - -`*hibernate.event.merge.entity_copy_observer*` (e.g. `disallow` (default value), `allow`, `log` (testing purpose only) or fully-qualified class name):: -Setting that specifies how Hibernate will respond when multiple representations of the same persistent entity ("entity copy") is detected while merging. -+ -The possible values are: -+ -disallow::: throws `IllegalStateException` if an entity copy is detected -allow::: performs the merge operation on each entity copy that is detected -log::: (provided for testing only) performs the merge operation on each entity copy that is detected and logs information about the entity copies. -This setting requires DEBUG logging be enabled for https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/event/internal/EntityCopyAllowedLoggedObserver.html[`EntityCopyAllowedLoggedObserver`]. - -In addition, the application may customize the behavior by providing an implementation of https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/event/spi/EntityCopyObserver.html[`EntityCopyObserver`] and setting `hibernate.event.merge.entity_copy_observer` to the class name. -When this property is set to `allow` or `log`, Hibernate will merge each entity copy detected while cascading the merge operation. -In the process of merging each entity copy, Hibernate will cascade the merge operation from each entity copy to its associations with `cascade = CascadeType.MERGE` or `cascade = CascadeType.ALL`. -The entity state resulting from merging an entity copy will be overwritten when another entity copy is merged. - -For more details, check out the <> section. - -[[configurations-envers]] -=== Envers properties - -`*hibernate.envers.autoRegisterListeners*` (e.g. `true` (default value) or `false`):: -When set to `false`, the Envers entity listeners are no longer auto-registered, so you need to register them manually during the bootstrap process. - -`*hibernate.integration.envers.enabled*` (e.g. `true` (default value) or `false`):: -Enable or disable the Hibernate Envers `Service` integration. - -`*hibernate.listeners.envers.autoRegister*`:: -Legacy setting. Use `hibernate.envers.autoRegisterListeners` or `hibernate.integration.envers.enabled` instead. - -[[configurations-spatial]] -=== Spatial properties - -`*hibernate.integration.spatial.enabled*` (e.g. `true` (default value) or `false`):: -Enable or disable the Hibernate Spatial `Service` integration. - -`*hibernate.spatial.connection_finder*` (e.g. `org.geolatte.geom.codec.db.oracle.DefaultConnectionFinder`):: -Define the fully-qualified name of class implementing the `org.geolatte.geom.codec.db.oracle.ConnectionFinder` interface. - -[[configurations-internal]] -=== Internal properties - -The following configuration properties are used internally, and you shouldn't probably have to configured them in your application. - -`*hibernate.enable_specj_proprietary_syntax*` (e.g. `true` or `false` (default value)):: -Enable or disable the SpecJ proprietary mapping syntax which differs from Jakarta Persistence specification. Used during performance testing only. - -`*hibernate.temp.use_jdbc_metadata_defaults*` (e.g. `true` (default value) or `false`):: -This setting is used to control whether we should consult the JDBC metadata to determine certain Settings default values when the database may not be available (mainly in tools usage). - -`*hibernate.connection_provider.injection_data*`:: -Connection provider settings to be injected (a `Map` instance) in the currently configured connection provider. - -`*hibernate.jandex_index*`:: -Names a Jandex `org.jboss.jandex.Index` instance to use. diff --git a/documentation/src/main/asciidoc/userguide/appendices/LegacyBasicTypeResolution.adoc b/documentation/src/main/asciidoc/userguide/appendices/LegacyBasicTypeResolution.adoc index c04727b58591..273be37a5aa8 100644 --- a/documentation/src/main/asciidoc/userguide/appendices/LegacyBasicTypeResolution.adoc +++ b/documentation/src/main/asciidoc/userguide/appendices/LegacyBasicTypeResolution.adoc @@ -1,7 +1,10 @@ -:sourcedir: ../../../../test/java/org/hibernate/userguide/mapping +:root-project-dir: ../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:sourcedir: {core-project-dir}/src/test/java/org/hibernate/orm/test/mapping :extrasdir: extras :originalextrasdir: ../chapters/domain/extras +[appendix] [[basic-legacy]] == Legacy BasicType resolution diff --git a/documentation/src/main/asciidoc/userguide/appendices/Legacy_DomainModel.adoc b/documentation/src/main/asciidoc/userguide/appendices/Legacy_DomainModel.adoc deleted file mode 100644 index 43f92dc654d5..000000000000 --- a/documentation/src/main/asciidoc/userguide/appendices/Legacy_DomainModel.adoc +++ /dev/null @@ -1,49 +0,0 @@ -[[appendix-legacy-domain-model]] -== Legacy Domain Model -:sourcedir: extras - -.Declaring a version property in `hbm.xml` -==== -[source,xml] ----- -include::{sourcedir}/version_property.xml[] ----- -==== - -[cols=",",] -|======================================================================= -|column |The name of the column holding the version number. Optional, defaults to the property name. -|name |The name of a property of the persistent class. -|type |The type of the version number. Optional, defaults to `integer`. -|access |Hibernate's strategy for accessing the property value. Optional, defaults to `property`. -|unsaved-value |Indicates that an instance is newly instantiated and thus unsaved. -This distinguishes it from detached instances that were saved or loaded in a previous session. -The default value, `undefined`, indicates that the identifier property value should be used. Optional. -|generated |Indicates that the version property value is generated by the database. Optional, defaults to `never`. -|insert |Whether or not to include the `version` column in SQL `insert` statements. -Defaults to `true`, but you can set it to `false` if the database column is defined with a default value of `0`. -|======================================================================= - -.The timestamp element in `hbm.xml` -==== -[source,xml] ----- -include::{sourcedir}/timestamp_version.xml[] ----- -==== - -[cols=",",] -|======================================================================= -|column |The name of the column which holds the timestamp. Optional, defaults to the property name -|name |The name of a JavaBeans style property of Java type `Date` or `Timestamp` of the persistent class. -|access |The strategy Hibernate uses to access the property value. Optional, defaults to `property`. -|unsaved-value |A version property which indicates that the instance is newly instantiated and unsaved. -This distinguishes it from detached instances that were saved or loaded in a previous session. -The default value of `undefined` indicates that Hibernate uses the identifier property value. -|source |Whether Hibernate retrieves the timestamp from the database or the current JVM. -Database-based timestamps incur an overhead because Hibernate needs to query the database each time to determine the incremental next value. -However, database-derived timestamps are safer to use in a clustered environment. -Not all database dialects are known to support the retrieval of the database's current timestamp. -Others may also be unsafe for locking because of lack of precision. -|generated |Whether the timestamp property value is generated by the database. Optional, defaults to `never`. -|======================================================================= \ No newline at end of file diff --git a/documentation/src/main/asciidoc/userguide/appendices/Legacy_Native_Queries.adoc b/documentation/src/main/asciidoc/userguide/appendices/Legacy_Native_Queries.adoc index af1962e1969e..ea446867c7f1 100644 --- a/documentation/src/main/asciidoc/userguide/appendices/Legacy_Native_Queries.adoc +++ b/documentation/src/main/asciidoc/userguide/appendices/Legacy_Native_Queries.adoc @@ -1,3 +1,4 @@ +[appendix] [[appendix-legacy-native-queries]] == Legacy Hibernate Native Queries diff --git a/documentation/src/main/asciidoc/userguide/appendices/SettingsReference.adoc b/documentation/src/main/asciidoc/userguide/appendices/SettingsReference.adoc new file mode 100644 index 000000000000..f7622db9f2ff --- /dev/null +++ b/documentation/src/main/asciidoc/userguide/appendices/SettingsReference.adoc @@ -0,0 +1,27 @@ +[appendix] +[[settings]] +== Configuration Settings + +Configuration settings can be broadly broken down into 3 categories - + +Jakarta Persistence:: + Settings which are standardized by the Jakarta Persistence specification for configuring any persistence provider. These + settings are defined by the `jakarta.persistence.` namespace +Hibernate:: + Hibernate-specific settings which control various Hibernate behaviors which are extensions to or outside the scope + of the Jakarta Persistence specification. These settings are defined by the `hibernate.` namespace +Legacy JPA:: + Settings which were standardized by Java Persistence, the legacy version of the Jakarta Persistence specification + (prior to version 3.1). These settings are defined by the `javax.persistence.` namespace + +[NOTE] +==== +For the time being, Hibernate continues to support the legacy Java Persistence settings in addition to +the Jakarta Persistence forms. Applications should strongly consider migrating to the new Jakarta Persistence +as support for the legacy Java Persistence will likely be removed at some point. + +For (legacy) Hibernate settings which have a direct Jakarta Persistence corollary, the Jakarta Persistence +form should be preferred - e.g. `hibernate.connection.driver_class` -> `jakarta.persistence.jdbc.driver`. +==== + +include::../../../../../target/asciidoc/fragments/config-settings.adoc[] diff --git a/documentation/src/main/asciidoc/userguide/chapters/batch/Batching.adoc b/documentation/src/main/asciidoc/userguide/chapters/batch/Batching.adoc index 9a65d6e19fc5..2154de949fd4 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/batch/Batching.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/batch/Batching.adoc @@ -1,7 +1,9 @@ [[batch]] == Batching -:sourcedir: ../../../../../test/java/org/hibernate/userguide/batch -:bulkid-sourcedir: ../../../../../../../hibernate-core/src/test/java/org/hibernate/orm/test/bulkid +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-doc-batch: {core-project-dir}/src/test/java/org/hibernate/orm/test/batch +:example-dir-bulkid: {core-project-dir}/src/test/java/org/hibernate/orm/test/bulkid :extrasdir: extras [[batch-jdbcbatch]] @@ -46,7 +48,7 @@ Since version 5.2, Hibernate allows overriding the global JDBC batch size given ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-session-jdbc-batch-size-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-session-jdbc-batch-size-example] ---- ==== @@ -60,7 +62,7 @@ The following example shows an anti-pattern for batch inserts. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-session-batch-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-session-batch-example] ---- ==== @@ -88,7 +90,7 @@ When you make new objects persistent, employ methods `flush()` and `clear()` to ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-session-batch-insert-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-session-batch-insert-example] ---- ==== @@ -103,7 +105,7 @@ In addition, use method `scroll()` to take advantage of server-side cursors for ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-session-scroll-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-session-scroll-example] ---- ==== @@ -149,7 +151,7 @@ IMPORTANT: Due to the lack of a first-level cache, stateless sessions are vulner ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-stateless-session-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-stateless-session-example] ---- ==== @@ -205,7 +207,7 @@ You can use sub-queries in the `WHERE` clause, and the sub-queries themselves ca ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-bulk-jpql-update-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-bulk-jpql-update-example] ---- ==== @@ -214,7 +216,7 @@ include::{sourcedir}/BatchTest.java[tags=batch-bulk-jpql-update-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-update-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-bulk-hql-update-example] ---- ==== @@ -226,7 +228,7 @@ You can use a versioned update to force Hibernate to reset the version or timest ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-update-version-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-bulk-hql-update-version-example] ---- ==== @@ -242,7 +244,7 @@ This feature is only available in HQL since it's not standardized by Jakarta Per ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-bulk-jpql-delete-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-bulk-jpql-delete-example] ---- ==== @@ -251,7 +253,7 @@ include::{sourcedir}/BatchTest.java[tags=batch-bulk-jpql-delete-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-delete-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-bulk-hql-delete-example] ---- ==== @@ -309,7 +311,7 @@ in which case the seed value defined by the `org.hibernate.type.descriptor.java. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchTest.java[tags=batch-bulk-hql-insert-example] +include::{example-dir-doc-batch}/BatchTests.java[tags=batch-bulk-hql-insert-example] ---- ==== @@ -344,7 +346,7 @@ The `Person` entity is the base class of this entity inheritance model, and is m ==== [source, JAVA, indent=0] ---- -include::{bulkid-sourcedir}/AbstractMutationStrategyCompositeIdTest.java[tags=batch-bulk-hql-temp-table-base-class-example] +include::{example-dir-bulkid}/AbstractMutationStrategyCompositeIdTest.java[tags=batch-bulk-hql-temp-table-base-class-example] ---- ==== @@ -355,7 +357,7 @@ Both the `Doctor` and `Engineer` entity classes extend the `Person` base class: ==== [source, JAVA, indent=0] ---- -include::{bulkid-sourcedir}/AbstractMutationStrategyIdTest.java[tags=batch-bulk-hql-temp-table-sub-classes-example] +include::{example-dir-bulkid}/AbstractMutationStrategyIdTest.java[tags=batch-bulk-hql-temp-table-sub-classes-example] ---- ==== @@ -369,7 +371,7 @@ Now, when you try to execute a bulk entity delete query: ==== [source, JAVA, indent=0] ---- -include::{bulkid-sourcedir}/AbstractMutationStrategyCompositeIdTest.java[tags=batch-bulk-hql-temp-table-delete-query-example] +include::{example-dir-bulkid}/AbstractMutationStrategyCompositeIdTest.java[tags=batch-bulk-hql-temp-table-delete-query-example] ---- [source, SQL, indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/beans/Beans.adoc b/documentation/src/main/asciidoc/userguide/chapters/beans/Beans.adoc index 411b86910a49..8574ce72f08f 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/beans/Beans.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/beans/Beans.adoc @@ -1,12 +1,6 @@ [[beans]] == Managed Beans -:rootProjectDir: ../../../../../../.. -:sourcedir: ../../../../../test/java/org/hibernate/userguide/beans -:coreProjectDir: {rootProjectDir}/hibernate-core -:coreTestSrcDir: {rootProjectDir}/hibernate-core/src/test/java -:instantiatorTestDir: {coreTestSrcDir}/org/hibernate/orm/test/mapping/embeddable/strategy/instantiator :extrasdir: extras -:fn-cdi-availability: footnote:disclaimer[With delayed or extended CDI availability, IdentifierGenerators cannot be resolved from CDI due to timing. See <>] Hibernate supports consuming many of its extension points as "managed beans". A bean being managed simply means that its creation and lifecycle are managed by a container of some sort. @@ -25,10 +19,11 @@ the SessionFactory. It supports a number of ways to influence how this process [[beans-manageable]] === Manageable Beans -Hibernate supports using the following integrations as managed beans: +Jakarta Persistence defines support for resolving `AttributeConverter` and +"entity listener" classes as managed beans. + +Additionally, Hibernate supports resolving the following integrations as managed beans: -* `jakarta.persistence.AttributeConverter` -* Jakarta Persistence "entity listener" classes * `org.hibernate.type.descriptor.jdbc.JdbcType` * `org.hibernate.type.descriptor.java.BasicJavaType` * `org.hibernate.type.descriptor.java.MutabilityPlan` @@ -36,7 +31,10 @@ Hibernate supports using the following integrations as managed beans: * `org.hibernate.usertype.UserCollectionType` * `org.hibernate.metamodel.EmbeddableInstantiator` * `org.hibernate.envers.RevisionListener` -* `org.hibernate.id.IdentifierGenerator`{fn-cdi-availability} +* `org.hibernate.id.IdentifierGenerator` + +NOTE: At the moment, when using either <> or <> +CDI access, resolving these Hibernate integrations as managed beans is disabled. [[beans-cdi]] @@ -94,4 +92,4 @@ NOTE: When used in WildFly, this is all automatically set up by the server === Custom BeanContainer Other containers (Spring, e.g.) can also be used and integrated by implementing `BeanContainer` and -declaring it using `hibernate.resource.beans.container`. \ No newline at end of file +declaring it using `hibernate.resource.beans.container`. diff --git a/documentation/src/main/asciidoc/userguide/chapters/bootstrap/Bootstrap.adoc b/documentation/src/main/asciidoc/userguide/chapters/bootstrap/Bootstrap.adoc index df84daa08dd0..53d17d1f0ae4 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/bootstrap/Bootstrap.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/bootstrap/Bootstrap.adoc @@ -1,7 +1,10 @@ [[bootstrap]] == Bootstrap -:sourcedir: ../../../../../test/java/org/hibernate/userguide/bootstrap -:boot-spi-sourcedir: ../../../../../../../hibernate-core/src/test/java/org/hibernate/orm/test/bootstrap/spi +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:core-test-base: {core-project-dir}/src/test/java/org/hibernate/orm/test +:example-dir-boot: {core-test-base}/bootstrap +:example-dir-boot-spi: {core-test-base}/bootstrap/spi :extrasdir: extras The term bootstrapping refers to initializing and starting a software component. @@ -10,7 +13,7 @@ The process is very different for each. [TIP] ==== -During the bootstrap process, you might want to customize Hibernate behavior so make sure you check the <> section as well. +During the bootstrap process, you might want to customize Hibernate behavior so make sure you check the <> section as well. ==== [[bootstrap-native]] @@ -48,7 +51,7 @@ If you wish to alter how the `BootstrapServiceRegistry` is built, that is contro ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-BootstrapServiceRegistry-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-BootstrapServiceRegistry-example] ---- ==== @@ -65,7 +68,7 @@ You will almost always need to configure the `StandardServiceRegistry`, which is ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-StandardServiceRegistryBuilder-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-StandardServiceRegistryBuilder-example] ---- ==== @@ -79,7 +82,7 @@ Some specific methods of interest: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-MetadataSources-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-bootstrap-native-registry-MetadataSources-example] ---- ==== @@ -93,7 +96,7 @@ The main use cases for an `org.hibernate.integrator.spi.Integrator` right now ar ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-event-listener-registration-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-event-listener-registration-example] ---- ==== @@ -112,7 +115,7 @@ Also, all methods on `MetadataSources` offer fluent-style call chaining:: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-metadata-source-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-native-metadata-source-example] ---- ==== @@ -136,7 +139,7 @@ See its https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hi ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-metadata-builder-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-native-metadata-builder-example] ---- ==== @@ -153,7 +156,7 @@ However, if you would like to adjust that building process, you will need to use ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-SessionFactory-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-native-SessionFactory-example] ---- ==== @@ -168,7 +171,7 @@ The bootstrapping API is quite flexible, but in most cases it makes the most sen ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-native-SessionFactoryBuilder-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-native-SessionFactoryBuilder-example] ---- ==== @@ -196,7 +199,7 @@ and make that available to the application for injection via the `jakarta.persis ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceUnit-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceUnit-example] ---- ==== @@ -208,7 +211,7 @@ you can inject a specific `EntityManagerFactory` by Unit name: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceUnit-configurable-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceUnit-configurable-example] ---- ==== @@ -231,7 +234,7 @@ The application creates an `EntityManagerFactory` by calling the `createEntityMa ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-EntityManagerFactory-example] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-jpa-compliant-EntityManagerFactory-example] ---- ==== @@ -249,7 +252,7 @@ To inject the default Persistence Context, you can use the {jpaJavadocUrlPrefix} ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceContext-example, indent=0] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceContext-example, indent=0] ---- ==== @@ -264,7 +267,7 @@ and you can even pass `EntityManager`-specific properties using the ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceContext-configurable-example, indent=0] +include::{example-dir-boot}/BootstrapTest.java[tags=bootstrap-jpa-compliant-PersistenceContext-configurable-example, indent=0] ---- ==== @@ -324,7 +327,7 @@ https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/ ==== [source, JAVA, indent=0] ---- -include::{boot-spi-sourcedir}/metadatabuildercontributor/SqlFunctionMetadataBuilderContributor.java[tags=bootstrap-jpa-compliant-MetadataBuilderContributor-example] +include::{example-dir-boot-spi}/metadatabuildercontributor/SqlFunctionMetadataBuilderContributor.java[tags=bootstrap-jpa-compliant-MetadataBuilderContributor-example] ---- ==== org.hibernate.orm.test.bootstrap.spi.metadatabuildercontributor diff --git a/documentation/src/main/asciidoc/userguide/chapters/caching/Caching.adoc b/documentation/src/main/asciidoc/userguide/chapters/caching/Caching.adoc index 4904cb220ede..74c8eec2dc00 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/caching/Caching.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/caching/Caching.adoc @@ -1,6 +1,8 @@ [[caching]] == Caching -:sourcedir: ../../../../../test/java/org/hibernate/userguide/caching +:root-project-dir: ../../../../../../.. +:jcache-project-dir: {root-project-dir}/hibernate-jcache +:example-dir-caching: {jcache-project-dir}/src/test/java/org/hibernate/orm/test/caching At runtime, Hibernate handles moving data into and out of the second-level cache in response to the operations performed by the `Session`, which acts as a transaction-level cache of persistent data. Once an entity becomes managed, that object is added to the internal cache of the current persistence context (`EntityManager` or `Session`). @@ -167,7 +169,7 @@ Nevertheless, the reasons why we advise you to have all entities belonging to an ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/NonStrictReadWriteCacheTest.java[tags=caching-entity-mapping-example] +include::{example-dir-caching}/NonStrictReadWriteCacheTest.java[tags=caching-entity-mapping-example] ---- ==== @@ -182,7 +184,7 @@ Once an entity is stored in the second-level cache, you can avoid a database hit ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-entity-jpa-example] +include::{example-dir-caching}/SecondLevelCacheTest.java[tags=caching-entity-jpa-example] ---- ==== @@ -191,7 +193,7 @@ include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-entity-jpa-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SecondLevelCacheTest.java[tags=caching-entity-native-example] +include::{example-dir-caching}/SecondLevelCacheTest.java[tags=caching-entity-native-example] ---- ==== @@ -202,7 +204,7 @@ The Hibernate second-level cache can also load entities by their <> section. +<> section. If `@JdbcTypeCode` is used, the Dialect is still consulted to make sure the database supports the requested type. If not, an appropriate type is selected @@ -566,7 +567,7 @@ By default, Hibernate maps values of `Float` to the `FLOAT`, `REAL` or ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/FloatMappingTests.java[tags=basic-float-example-implicit] +include::{example-dir-basic-mapping}/basic/FloatMappingTests.java[tags=basic-float-example-implicit] ---- ==== @@ -590,7 +591,7 @@ By default, Hibernate maps values of `BigDecimal` to the `NUMERIC` JDBC type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/BigDecimalMappingTests.java[tags=basic-bigdecimal-example-implicit] +include::{example-dir-basic-mapping}/basic/BigDecimalMappingTests.java[tags=basic-bigdecimal-example-implicit] ---- ==== @@ -608,7 +609,7 @@ By default, Hibernate maps `Character` to the `CHAR` JDBC type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/CharacterMappingTests.java[tags=basic-character-example-implicit] +include::{example-dir-basic-mapping}/basic/CharacterMappingTests.java[tags=basic-character-example-implicit] ---- ==== @@ -626,7 +627,7 @@ By default, Hibernate maps `String` to the `VARCHAR` JDBC type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/StringMappingTests.java[tags=basic-string-example] +include::{example-dir-basic-mapping}/basic/StringMappingTests.java[tags=basic-string-example] ---- ==== @@ -673,7 +674,7 @@ nationalized data. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/StringNationalizedMappingTests.java[tags=basic-nstring-example] +include::{example-dir-basic-mapping}/basic/StringNationalizedMappingTests.java[tags=basic-nstring-example] ---- ==== @@ -687,14 +688,18 @@ See <> for details on mapping strings using nationalized cha [[basic-chararray]] ==== Character arrays -By default, Hibernate maps `Character[]` and `char[]` to the `VARCHAR` JDBC type. +By default, Hibernate maps `char[]` to the `VARCHAR` JDBC type. +Since `Character[]` can contain null elements, it is mapped as <> instead. +Prior to Hibernate 6.2, also `Character[]` mapped to `VARCHAR`, yet disallowed `null` elements. +To continue mapping `Character[]` to the `VARCHAR` JDBC type, or for LOBs mapping to the `CLOB` JDBC type, +it is necessary to annotate the persistent attribute with `@JavaType( CharacterArrayJavaType.class )`. [[basic-string-example-implicit]] .Mapping Character ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/CharacterArrayMappingTests.java[tags=basic-chararray-example] +include::{example-dir-basic-mapping}/basic/CharacterArrayMappingTests.java[tags=basic-chararray-example] ---- ==== @@ -708,7 +713,7 @@ nationalized data. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/CharacterArrayNationalizedMappingTests.java[tags=basic-nchararray-example] +include::{example-dir-basic-mapping}/basic/CharacterArrayNationalizedMappingTests.java[tags=basic-nchararray-example] ---- ==== @@ -747,7 +752,7 @@ Let's first map this using the `@Lob` Jakarta Persistence annotation and the `ja ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ClobTest.java[tags=basic-clob-example] +include::{example-dir-basic-mapping}/basic/ClobTest.java[tags=basic-clob-example] ---- ==== @@ -758,7 +763,7 @@ To persist such an entity, you have to create a `Clob` using the `ClobProxy` Hib ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ClobTest.java[tags=basic-clob-persist-example] +include::{example-dir-basic-mapping}/basic/ClobTest.java[tags=basic-clob-persist-example] ---- ==== @@ -769,7 +774,7 @@ To retrieve the `Clob` content, you need to transform the underlying `java.io.Re ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ClobTest.java[tags=basic-clob-find-example] +include::{example-dir-basic-mapping}/basic/ClobTest.java[tags=basic-clob-find-example] ---- ==== @@ -780,7 +785,7 @@ We could also map the CLOB in a materialized form. This way, we can either use a ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ClobStringTest.java[tags=basic-clob-string-example] +include::{example-dir-basic-mapping}/basic/ClobStringTest.java[tags=basic-clob-string-example] ---- ==== @@ -791,7 +796,7 @@ We might even want the materialized data as a char array. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ClobCharArrayTest.java[tags=basic-clob-char-array-example] +include::{example-dir-basic-mapping}/basic/ClobCharArrayTest.java[tags=basic-clob-char-array-example] ---- ==== @@ -813,7 +818,7 @@ Hibernate can map the `NCLOB` to a `java.sql.NClob` ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/NClobTest.java[tags=basic-nclob-example] +include::{example-dir-basic-mapping}/basic/NClobTest.java[tags=basic-nclob-example] ---- ==== @@ -824,7 +829,7 @@ To persist such an entity, you have to create an `NClob` using the `NClobProxy` ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/NClobTest.java[tags=basic-nclob-persist-example] +include::{example-dir-basic-mapping}/basic/NClobTest.java[tags=basic-nclob-persist-example] ---- ==== @@ -835,7 +840,7 @@ To retrieve the `NClob` content, you need to transform the underlying `java.io.R ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/NClobTest.java[tags=basic-nclob-find-example] +include::{example-dir-basic-mapping}/basic/NClobTest.java[tags=basic-nclob-find-example] ---- ==== @@ -846,7 +851,7 @@ We could also map the `NCLOB` in a materialized form. This way, we can either us ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/NClobStringTest.java[tags=basic-nclob-string-example] +include::{example-dir-basic-mapping}/basic/NClobStringTest.java[tags=basic-nclob-string-example] ---- ==== @@ -857,7 +862,7 @@ We might even want the materialized data as a char array. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/NClobCharArrayTest.java[tags=basic-nclob-char-array-example] +include::{example-dir-basic-mapping}/basic/NClobCharArrayTest.java[tags=basic-nclob-char-array-example] ---- ==== @@ -870,15 +875,18 @@ include::{sourcedir}/basic/NClobCharArrayTest.java[tags=basic-nclob-char-array-e [[basic-bytearray]] ==== Byte array -By default, Hibernate maps values of type `byte[]` and `Byte[]` to the JDBC type -`VARBINARY`. +By default, Hibernate maps `byte[]` to the `VARBINARY` JDBC type. +Since `Byte[]` can contain null elements, it is mapped as <> instead. +Prior to Hibernate 6.2, also `Byte[]` mapped to `VARBINARY`, yet disallowed `null` elements. +To continue mapping `Byte[]` to the `VARBINARY` JDBC type, or for LOBs mapping to the `BLOB` JDBC type, +it is necessary to annotate the persistent attribute with `@JavaType( ByteArrayJavaType.class )`. [[basic-bytearray-example]] .Mapping arrays of bytes ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ByteArrayMappingTests.java[tags=basic-bytearray-example] +include::{example-dir-basic-mapping}/basic/ByteArrayMappingTests.java[tags=basic-bytearray-example] ---- ==== @@ -936,7 +944,7 @@ Let's first map this using the JDBC `java.sql.Blob` type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/BlobTest.java[tags=basic-blob-example] +include::{example-dir-basic-mapping}/basic/BlobTest.java[tags=basic-blob-example] ---- ==== @@ -947,7 +955,7 @@ To persist such an entity, you have to create a `Blob` using the `BlobProxy` Hib ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/BlobTest.java[tags=basic-blob-persist-example] +include::{example-dir-basic-mapping}/basic/BlobTest.java[tags=basic-blob-persist-example] ---- ==== @@ -958,7 +966,7 @@ To retrieve the `Blob` content, you need to transform the underlying `java.io.In ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/BlobTest.java[tags=basic-blob-find-example] +include::{example-dir-basic-mapping}/basic/BlobTest.java[tags=basic-blob-find-example] ---- ==== @@ -969,7 +977,7 @@ We could also map the BLOB in a materialized form (e.g. `byte[]`). ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/BlobByteArrayTest.java[tags=basic-blob-byte-array-example] +include::{example-dir-basic-mapping}/basic/BlobByteArrayTest.java[tags=basic-blob-byte-array-example] ---- ==== @@ -982,14 +990,14 @@ include::{sourcedir}/basic/BlobByteArrayTest.java[tags=basic-blob-byte-array-exa By default, Hibernate maps `Duration` to the `NUMERIC` SQL type. -TIP: It's possible to map `Duration` to the `INTERVAL_SECOND` SQL type using `@JdbcType(INTERVAL_SECOND)` or by setting `hibernate.type.preferred_duration_jdbc_type=INTERVAL_SECOND` +TIP: It's possible to map `Duration` to the `INTERVAL_SECOND` SQL type using `@JdbcTypeCode(INTERVAL_SECOND)` or by setting `hibernate.type.preferred_duration_jdbc_type=INTERVAL_SECOND` [[basic-duration-example]] .Mapping Duration ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/DurationMappingTests.java[tags=basic-duration-example] +include::{example-dir-basic-mapping}/basic/DurationMappingTests.java[tags=basic-duration-example] ---- ==== @@ -1007,7 +1015,7 @@ include::{sourcedir}/basic/DurationMappingTests.java[tags=basic-duration-example ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/InstantMappingTests.java[tags=basic-instant-example] +include::{example-dir-basic-mapping}/basic/InstantMappingTests.java[tags=basic-instant-example] ---- ==== @@ -1028,7 +1036,7 @@ See <> for basics of temporal mapping ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/LocalDateMappingTests.java[tags=basic-localDate-example] +include::{example-dir-basic-mapping}/basic/LocalDateMappingTests.java[tags=basic-localDate-example] ---- ==== @@ -1049,7 +1057,7 @@ See <> for basics of temporal mapping ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/LocalDateTimeMappingTests.java[tags=basic-localDateTime-example] +include::{example-dir-basic-mapping}/basic/LocalDateTimeMappingTests.java[tags=basic-localDateTime-example] ---- ==== @@ -1070,7 +1078,7 @@ See <> for basics of temporal mapping ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/LocalTimeMappingTests.java[tags=basic-localTime-example] +include::{example-dir-basic-mapping}/basic/LocalTimeMappingTests.java[tags=basic-localTime-example] ---- ==== @@ -1093,7 +1101,7 @@ depending on the database. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/OffsetDateTimeMappingTests.java[tags=basic-OffsetDateTime-example] +include::{example-dir-basic-mapping}/basic/OffsetDateTimeMappingTests.java[tags=basic-OffsetDateTime-example] ---- ==== @@ -1119,7 +1127,7 @@ depending on the database. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/OffsetTimeMappingTests.java[tags=basic-offsetTime-example] +include::{example-dir-basic-mapping}/basic/OffsetTimeMappingTests.java[tags=basic-offsetTime-example] ---- ==== @@ -1141,7 +1149,7 @@ See <> for basics of time-zone handling ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/TimeZoneMappingTests.java[tags=basic-timeZone-example] +include::{example-dir-basic-mapping}/basic/TimeZoneMappingTests.java[tags=basic-timeZone-example] ---- ==== @@ -1163,7 +1171,7 @@ depending on the database. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ZonedDateTimeMappingTests.java[tags=basic-ZonedDateTime-example] +include::{example-dir-basic-mapping}/basic/ZonedDateTimeMappingTests.java[tags=basic-ZonedDateTime-example] ---- ==== @@ -1187,7 +1195,7 @@ See <> for basics of time-zone handling ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ZoneOffsetMappingTests.java[tags=basic-ZoneOffset-example] +include::{example-dir-basic-mapping}/basic/ZoneOffsetMappingTests.java[tags=basic-ZoneOffset-example] ---- ==== @@ -1257,7 +1265,7 @@ Hibernate maps `Class` references to `VARCHAR` JDBC type ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ClassMappingTests.java[tags=basic-Class-example] +include::{example-dir-basic-mapping}/basic/ClassMappingTests.java[tags=basic-Class-example] ---- ==== @@ -1272,7 +1280,7 @@ Hibernate maps `Currency` references to `VARCHAR` JDBC type ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/CurrencyMappingTests.java[tags=basic-Currency-example] +include::{example-dir-basic-mapping}/basic/CurrencyMappingTests.java[tags=basic-Currency-example] ---- ==== @@ -1287,7 +1295,7 @@ Hibernate maps `Locale` references to `VARCHAR` JDBC type ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/LocaleMappingTests.java[tags=basic-Locale-example] +include::{example-dir-basic-mapping}/basic/LocaleMappingTests.java[tags=basic-Locale-example] ---- ==== @@ -1338,7 +1346,7 @@ By default, Hibernate will map `InetAddress` to the `INET` SQL type and fallback ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/InetAddressMappingTests.java[tags=basic-inet-address-example] +include::{example-dir-basic-mapping}/basic/InetAddressMappingTests.java[tags=basic-inet-address-example] ---- ==== @@ -1355,7 +1363,7 @@ as can be read in the <> with two differences: +This functionality is similar to a derived-property <> with two differences: * The property is backed by one or more columns that are exported as part of automatic schema generation. * The property is read-write, not read-only. @@ -2641,7 +2649,7 @@ The `write` expression, if specified, must contain exactly one '?' placeholder f ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/ColumnTransformerTest.java[tags=mapping-column-read-and-write-composite-type-persistence-example] +include::{example-dir-basic-mapping}/basic/ColumnTransformerTest.java[tags=mapping-column-read-and-write-composite-type-persistence-example] ---- [source, SQL, indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/collections.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/collections.adoc index 325010be2bf6..7827c8c1f747 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/collections.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/collections.adoc @@ -1,16 +1,14 @@ [[collections]] === Collections -:rootProjectDir: ../../../../../../.. -:documentationProjectDir: {rootProjectDir}/documentation -:docTestsDir: ../../../../../test/java/org/hibernate/userguide/collections -:coreProjectDir: {rootProjectDir}/hibernate-core -:coreTestsDir: {coreProjectDir}/src/test/java -:coreCollectionTestsDir: {coreTestsDir}/org/hibernate/orm/test/mapping/collections -:classificationTestsDir: {coreCollectionTestsDir}/classification -:extrasdir: extras/collections -:docs-base: https://docs.jboss.org/hibernate/orm/6.0 +:majorMinorVersion: 6.2 +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:core-test-base: {core-project-dir}/src/test/java +:example-dir-collection: {core-test-base}/org/hibernate/orm/test/mapping/collections +:docs-base: https://docs.jboss.org/hibernate/orm/{majorMinorVersion} :javadoc-base: {docs-base}/javadoc :java-javadoc-base: https://docs.oracle.com/en/java/javase/11/docs/api/java.base +:extrasdir: extras/collections Hibernate supports mapping collections (`java.util.Collection` and `java.util.Map` subtypes) in a variety of ways. @@ -40,7 +38,7 @@ The semantics of a collection describes how to handle the collection, including * the collection subtype to use - `java.util.List`, `java.util.Set`, `java.util.SortedSet`, etc. * how to access elements of the collection -* how to create instances of the collection - both "raw" and "wrapper" forms. See <> +* how to create instances of the collection - both "raw" and "<>" forms. Hibernate supports the following semantics: @@ -80,7 +78,7 @@ interpretation as to which classification it fits in to, using the following che ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/list/EntityWithList.java[tags=collections-list-ex] +include::{example-dir-collection}/classification/list/EntityWithList.java[tags=collections-list-ex] ---- ==== @@ -108,7 +106,7 @@ The default column name that stores the index is derived from the attribute name ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/list/EntityWithOrderColumnList.java[tags=collections-list-ordercolumn-ex] +include::{example-dir-collection}/classification/list/EntityWithOrderColumnList.java[tags=collections-list-ordercolumn-ex] ---- ==== @@ -125,7 +123,7 @@ cases using its `@ListIndexBase` annotation. ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/list/EntityWithIndexBasedList.java[tags=collections-list-indexbase-ex] +include::{example-dir-collection}/classification/list/EntityWithIndexBasedList.java[tags=collections-list-indexbase-ex] ---- ==== @@ -145,7 +143,7 @@ mapping sets according to the requirements of the `java.util.Set`. ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/set/EntityWithSet.java[tags=collections-set-ex] +include::{example-dir-collection}/classification/set/EntityWithSet.java[tags=collections-set-ex] ---- ==== @@ -172,9 +170,9 @@ this implies that the element type is `Comparable`. E.g. ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/Name.java[tags=collections-name-ex] +include::{example-dir-collection}/classification/Name.java[tags=collections-name-ex] -include::{classificationTestsDir}/set/EntityWithNaturallySortedSet.java[tags=collections-sortedset-natural-ex] +include::{example-dir-collection}/classification/set/EntityWithNaturallySortedSet.java[tags=collections-sortedset-natural-ex] ---- ==== @@ -189,9 +187,9 @@ the `Names` as sorted by a `NameComparator`: ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/NameComparator.java[tags=collections-name-comparator-ex] +include::{example-dir-collection}/classification/NameComparator.java[tags=collections-name-comparator-ex] -include::{classificationTestsDir}/set/EntityWithSortedSet.java[tags=collections-sortedset-comparator-ex] +include::{example-dir-collection}/classification/set/EntityWithSortedSet.java[tags=collections-sortedset-comparator-ex] ---- ==== @@ -232,7 +230,7 @@ are handled is largely undefined. ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/bag/EntityWithBagAsCollection.java[tags=collections-bag-ex] +include::{example-dir-collection}/classification/bag/EntityWithBagAsCollection.java[tags=collections-bag-ex] ---- ==== @@ -244,7 +242,7 @@ lists as bags. First an explicit annotation ==== [source, JAVA, indent=0] ---- -include::{classificationTestsDir}/bag/EntityWithBagAsList.java[tags=collections-bag-list-ex] +include::{example-dir-collection}/classification/bag/EntityWithBagAsList.java[tags=collections-bag-list-ex] ---- ==== @@ -302,7 +300,7 @@ The embeddable used in the examples is a `PhoneNumber` - ==== [source,java] ---- -include::{coreCollectionTestsDir}/nature/elemental/Phone.java[tags=ex-collection-elemental-model,indent=0] +include::{example-dir-collection}/nature/elemental/Phone.java[tags=ex-collection-elemental-model,indent=0] ---- ==== @@ -314,7 +312,7 @@ First, a BAG mapping - ==== [source,java] ---- -include::{coreCollectionTestsDir}/nature/elemental/ElementalBagTest.java[tags=ex-collection-elemental-model,indent=0] +include::{example-dir-collection}/nature/elemental/ElementalBagTest.java[tags=ex-collection-elemental-model,indent=0] ---- ==== @@ -324,7 +322,7 @@ include::{coreCollectionTestsDir}/nature/elemental/ElementalBagTest.java[tags=ex ==== [source,java] ---- -include::{coreCollectionTestsDir}/nature/elemental/ElementalBagTest.java[tags=ex-collection-elemental-lifecycle,indent=0] +include::{example-dir-collection}/nature/elemental/ElementalBagTest.java[tags=ex-collection-elemental-lifecycle,indent=0] ---- [source,sql] @@ -382,7 +380,7 @@ cross between the ordered-ness of a `List` and the uniqueness of a `Set`. First ==== [source, JAVA, indent=0] ---- -include::{coreCollectionTestsDir}/semantics/TheEntityWithUniqueList.java[tags=ex-collections-custom-type-model] +include::{example-dir-collection}/semantics/TheEntityWithUniqueList.java[tags=ex-collections-custom-type-model] ---- ==== @@ -393,7 +391,7 @@ The mapping says to use the `UniqueListType` class for the mapping of the plural ==== [source, JAVA, indent=0] ---- -include::{coreCollectionTestsDir}/semantics/UniqueListType.java[tags=collections-custom-type-ex] +include::{example-dir-collection}/semantics/UniqueListType.java[tags=collections-custom-type-ex] ---- ==== @@ -404,7 +402,7 @@ Most custom `UserCollectionType` implementations will want their own `Persistent ==== [source, JAVA, indent=0] ---- -include::{coreCollectionTestsDir}/semantics/UniqueListWrapper.java[tags=collections-custom-semantics-ex] +include::{example-dir-collection}/semantics/UniqueListWrapper.java[tags=collections-custom-semantics-ex] ---- ==== @@ -424,7 +422,7 @@ plural attributes of a given classification, Hibernate also provides the ==== [source, JAVA, indent=0] ---- -include::{coreCollectionTestsDir}/semantics/TheEntityWithUniqueListRegistration.java[tags=ex-collections-custom-type-model] +include::{example-dir-collection}/semantics/TheEntityWithUniqueListRegistration.java[tags=ex-collections-custom-type-model] ---- ==== @@ -484,7 +482,7 @@ Behind the scenes, Hibernate requires an association table to manage the parent- ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalBagTest.java[tags=collections-unidirectional-bag-example,indent=0] +include::{example-dir-collection}/UnidirectionalBagTest.java[tags=collections-unidirectional-bag-example,indent=0] ---- [source,sql] @@ -507,7 +505,7 @@ By marking the parent side with the `CascadeType.ALL` attribute, the unidirectio ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalBagTest.java[tags=collections-unidirectional-bag-lifecycle-example,indent=0] +include::{example-dir-collection}/UnidirectionalBagTest.java[tags=collections-unidirectional-bag-lifecycle-example,indent=0] ---- [source,sql] @@ -536,7 +534,7 @@ The `@ManyToOne` side is the owning side of the bidirectional bag association, w ==== [source,java] ---- -include::{docTestsDir}/BidirectionalBagTest.java[tags=collections-bidirectional-bag-example,indent=0] +include::{example-dir-collection}/BidirectionalBagTest.java[tags=collections-bidirectional-bag-example,indent=0] ---- [source,sql] @@ -550,7 +548,7 @@ include::{extrasdir}/collections-bidirectional-bag-example.sql[] ==== [source,java] ---- -include::{docTestsDir}/BidirectionalBagTest.java[tags=collections-bidirectional-bag-lifecycle-example,indent=0] +include::{example-dir-collection}/BidirectionalBagTest.java[tags=collections-bidirectional-bag-lifecycle-example,indent=0] ---- [source,sql] @@ -564,7 +562,7 @@ include::{extrasdir}/collections-bidirectional-bag-lifecycle-example.sql[] ==== [source,java] ---- -include::{docTestsDir}/BidirectionalBagOrphanRemovalTest.java[tags=collections-bidirectional-bag-orphan-removal-example,indent=0] +include::{example-dir-collection}/BidirectionalBagOrphanRemovalTest.java[tags=collections-bidirectional-bag-orphan-removal-example,indent=0] ---- [source,sql] @@ -594,7 +592,7 @@ When using the `@OrderBy` annotation, the mapping looks as follows: ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalOrderedByListTest.java[tags=collections-unidirectional-ordered-list-order-by-example,indent=0] +include::{example-dir-collection}/UnidirectionalOrderedByListTest.java[tags=collections-unidirectional-ordered-list-order-by-example,indent=0] ---- ==== @@ -626,7 +624,7 @@ Another ordering option is to use the `@OrderColumn` annotation: ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalOrderColumnListTest.java[tags=collections-unidirectional-ordered-list-order-column-example,indent=0] +include::{example-dir-collection}/UnidirectionalOrderColumnListTest.java[tags=collections-unidirectional-ordered-list-order-column-example,indent=0] ---- [source,sql] @@ -659,7 +657,7 @@ The mapping is similar with the <> example, just ==== [source,java] ---- -include::{docTestsDir}/BidirectionalOrderByListTest.java[tags=collections-bidirectional-ordered-list-order-by-example,indent=0] +include::{example-dir-collection}/BidirectionalOrderByListTest.java[tags=collections-bidirectional-ordered-list-order-by-example,indent=0] ---- ==== @@ -672,7 +670,7 @@ When using the `@OrderColumn` annotation, the `order_id` column is going to be e ==== [source,java] ---- -include::{docTestsDir}/BidirectionalOrderColumnListTest.java[tags=collections-bidirectional-ordered-list-order-column-example,indent=0] +include::{example-dir-collection}/BidirectionalOrderColumnListTest.java[tags=collections-bidirectional-ordered-list-order-column-example,indent=0] ---- [source,sql] @@ -693,7 +691,7 @@ You can customize the ordinal of the underlying ordered list by using the https: ==== [source,java] ---- -include::{docTestsDir}/OrderColumnListIndexBaseTest.java[tags=collections-customizing-ordered-list-ordinal-mapping-example,indent=0] +include::{example-dir-collection}/OrderColumnListIndexBaseTest.java[tags=collections-customizing-ordered-list-ordinal-mapping-example,indent=0] ---- ==== @@ -704,7 +702,7 @@ When inserting two `Phone` records, Hibernate is going to start the List index f ==== [source,java] ---- -include::{docTestsDir}/OrderColumnListIndexBaseTest.java[tags=collections-customizing-ordered-list-ordinal-persist-example,indent=0] +include::{example-dir-collection}/OrderColumnListIndexBaseTest.java[tags=collections-customizing-ordered-list-ordinal-persist-example,indent=0] ---- [source,sql] @@ -729,7 +727,7 @@ by the number of characters of the `name` attribute. ==== [source,java] ---- -include::{docTestsDir}/OrderedBySQLTest.java[tags=collections-customizing-ordered-by-sql-clause-mapping-example,indent=0] +include::{example-dir-collection}/OrderedBySQLTest.java[tags=collections-customizing-ordered-by-sql-clause-mapping-example,indent=0] ---- ==== @@ -740,7 +738,7 @@ When fetching the `articles` collection, Hibernate uses the ORDER BY SQL clause ==== [source,java] ---- -include::{docTestsDir}/OrderedBySQLTest.java[tags=collections-customizing-ordered-by-sql-clause-fetching-example,indent=0] +include::{example-dir-collection}/OrderedBySQLTest.java[tags=collections-customizing-ordered-by-sql-clause-fetching-example,indent=0] ---- [source,sql] @@ -764,7 +762,7 @@ The unidirectional set uses a link table to hold the parent-child associations a ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalSetTest.java[tags=collections-unidirectional-set-example,indent=0] +include::{example-dir-collection}/UnidirectionalSetTest.java[tags=collections-unidirectional-set-example,indent=0] ---- ==== @@ -789,7 +787,7 @@ The lifecycle is just like with bidirectional bags except for the duplicates whi ==== [source,java] ---- -include::{docTestsDir}/BidirectionalSetTest.java[tags=collections-bidirectional-set-example,indent=0] +include::{example-dir-collection}/BidirectionalSetTest.java[tags=collections-bidirectional-set-example,indent=0] ---- ==== @@ -809,7 +807,7 @@ A `SortedSet` that relies on the natural sorting order given by the child elemen ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalSortedSetTest.java[tags=collections-unidirectional-sorted-set-natural-comparator-example,indent=0] +include::{example-dir-collection}/UnidirectionalSortedSetTest.java[tags=collections-unidirectional-sorted-set-natural-comparator-example,indent=0] ---- ==== @@ -822,7 +820,7 @@ To provide a custom sorting logic, Hibernate also provides a `@SortComparator` a ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalComparatorSortedSetTest.java[tags=collections-unidirectional-sorted-set-custom-comparator-example,indent=0] +include::{example-dir-collection}/UnidirectionalComparatorSortedSetTest.java[tags=collections-unidirectional-sorted-set-custom-comparator-example,indent=0] ---- ==== @@ -836,9 +834,9 @@ The `@SortNatural` and `@SortComparator` work the same for bidirectional sorted ==== [source,java] ---- -include::{docTestsDir}/BidirectionalSortedSetTest.java[tags=collections-bidirectional-sorted-set-example,indent=0] +include::{example-dir-collection}/BidirectionalSortedSetTest.java[tags=collections-bidirectional-sorted-set-example,indent=0] -include::{docTestsDir}/UnidirectionalComparatorSortedSetTest.java[lines=75..77,indent=0] +include::{example-dir-collection}/UnidirectionalComparatorSortedSetTest.java[lines=75..77,indent=0] ---- ==== @@ -871,7 +869,7 @@ A map of value type must use the `@ElementCollection` annotation, just like valu ==== [source,java] ---- -include::{docTestsDir}/ElementCollectionMapTest.java[tags=collections-map-value-type-entity-key-example,indent=0] +include::{example-dir-collection}/ElementCollectionMapTest.java[tags=collections-map-value-type-entity-key-example,indent=0] ---- [source,sql] @@ -887,7 +885,7 @@ Adding entries to the map generates the following SQL statements: ==== [source,java] ---- -include::{docTestsDir}/ElementCollectionMapTest.java[tags=collections-map-value-type-entity-key-add-example,indent=0] +include::{example-dir-collection}/ElementCollectionMapTest.java[tags=collections-map-value-type-entity-key-add-example,indent=0] ---- [source,sql] @@ -925,7 +923,7 @@ Since we want to map all the calls by their associated `java.util.Date`, not by ==== [source,java] ---- -include::{docTestsDir}/MapKeyTypeTest.java[tags=collections-map-custom-key-type-mapping-example,indent=0] +include::{example-dir-collection}/MapKeyTypeTest.java[tags=collections-map-custom-key-type-mapping-example,indent=0] ---- ==== @@ -940,7 +938,7 @@ Considering you have the following `PhoneNumber` interface with an implementatio ==== [source,java] ---- -include::{docTestsDir}/MapKeyClassTest.java[tags=collections-map-key-class-type-mapping-example,indent=0] +include::{example-dir-collection}/MapKeyClassTest.java[tags=collections-map-key-class-type-mapping-example,indent=0] ---- ==== @@ -952,7 +950,7 @@ If you want to use the `PhoneNumber` interface as a `java.util.Map` key, then yo ==== [source,java] ---- -include::{docTestsDir}/MapKeyClassTest.java[tags=collections-map-key-class-mapping-example,indent=0] +include::{example-dir-collection}/MapKeyClassTest.java[tags=collections-map-key-class-mapping-example,indent=0] ---- [source,sql] @@ -969,7 +967,7 @@ Hibernate generates the following SQL statements: ==== [source,java] ---- -include::{docTestsDir}/MapKeyClassTest.java[tags=collections-map-key-class-persist-example,indent=0] +include::{example-dir-collection}/MapKeyClassTest.java[tags=collections-map-key-class-persist-example,indent=0] ---- [source,sql] @@ -986,7 +984,7 @@ Hibernate generates the following SQL statements: ==== [source,java] ---- -include::{docTestsDir}/MapKeyClassTest.java[tags=collections-map-key-class-fetch-example,indent=0] +include::{example-dir-collection}/MapKeyClassTest.java[tags=collections-map-key-class-fetch-example,indent=0] ---- [source,sql] @@ -1013,7 +1011,7 @@ The `@MapKey` annotation is used to define the entity attribute used as a key of ==== [source,java] ---- -include::{docTestsDir}/UnidirectionalMapTest.java[tags=collections-map-unidirectional-example,indent=0] +include::{example-dir-collection}/UnidirectionalMapTest.java[tags=collections-map-unidirectional-example,indent=0] ---- [source,sql] @@ -1034,7 +1032,7 @@ In the following example, you can see that `@MapKeyEnumerated` was used so that ==== [source,java] ---- -include::{docTestsDir}/BidirectionalMapTest.java[tags=collections-map-bidirectional-example,indent=0] +include::{example-dir-collection}/BidirectionalMapTest.java[tags=collections-map-bidirectional-example,indent=0] ---- [source,sql] @@ -1049,40 +1047,34 @@ include::{extrasdir}/collections-map-bidirectional-example.sql[] When discussing arrays, it is important to understand the distinction between SQL array types and Java arrays that are mapped as part of the application's domain model. Not all databases implement the SQL-99 ARRAY type and, for this reason, -Hibernate doesn't support native database array types. +the SQL type used by Hibernate for arrays varies depending on the database support. + +NOTE: It is impossible for Hibernate to offer lazy-loading for arrays of entities and, for this reason, +it is strongly recommended to map a "collection" of entities using a List or Set rather than an array. -Hibernate does support the mapping of arrays in the Java domain model - conceptually the same as mapping a List. -However, it is important to realize that it is impossible for Hibernate to offer lazy-loading for arrays of entities and, for this reason, -it is strongly recommended to map a "collection" of entities using a List rather than an array. +[[collections-array-as-basic]] +==== [[collections-array-binary]] Arrays as basic value type -[[collections-array-binary]] -==== Arrays as binary +By default, Hibernate will choose a type for the array based on `Dialect.getPreferredSqlTypeCodeForArray()`. +Prior to Hibernate 6.1, the default was to always use the BINARY type, as supported by the current `Dialect`, +but now, Hibernate will leverage the native array data types if possible. -By default, Hibernate will choose a BINARY type, as supported by the current `Dialect`. +[[collections-array-binary-example]] To force the BINARY type, the persistent attribute has to be annotated with `@JdbcTypeCode(SqlTypes.VARBINARY)`. -[[collections-array-binary-example]] -.Arrays stored as binary +[[collections-array-as-basic-example]] +.Arrays stored as SQL array ==== [source,java] ---- -include::{docTestsDir}/ArrayTest.java[tags=collections-array-binary-example,indent=0] +include::{example-dir-collection}/ArrayTest.java[tags=collections-array-as-basic-example,indent=0] ---- [source,sql] ---- -include::{extrasdir}/collections-array-binary-example.sql[] +include::{extrasdir}/collections-array-as-basic-example.sql[] ---- ==== -[NOTE] -==== -If you want to map arrays such as `String[]` or `int[]` to database-specific array types like PostgreSQL `integer[]` or `text[]`, -you need to write a custom Hibernate Type. - -Check out https://vladmihalcea.com/how-to-map-java-and-sql-arrays-with-jpa-and-hibernate/[this article] for an example of how to write -such a custom Hibernate Type. -==== - [[collections-as-basic]] ==== Collections as basic value type @@ -1090,19 +1082,33 @@ Notice how all the previous examples explicitly mark the collection attribute as `@OneToMany` or `@ManyToMany`. Attributes of collection or array type without any of those annotations are considered basic types and by -default mapped to the database as VARBINARY. +default mapped like basic arrays as depicted in the <>. + +[[collections-as-basic-example]] +.Collections stored as SQL array +==== +[source,java] +---- +include::{example-dir-collection}/CollectionTest.java[tags=collections-as-basic-example,indent=0] +---- + +[source,sql] +---- +include::{extrasdir}/collections-array-as-basic-example.sql[] +---- +==== -Such mappings are not normal and beyond the scope of this documentation. The best way to map such a situation -is using an <>. +Prior to Hibernate 6.1, it was common to use an <> to map the elements +into e.g. a comma separated list which is still a viable option. Just note that it is not required anymore. [[ex-collections-comma-delimited-list]] .Comma delimited collection ==== [source,java] ---- -include::{coreCollectionTestsDir}/asbasic/CommaDelimitedStringsConverter.java[tags=ex-csv-converter,indent=0] +include::{example-dir-collection}/asbasic/CommaDelimitedStringsConverter.java[tags=ex-csv-converter,indent=0] -include::{coreCollectionTestsDir}/asbasic/CommaDelimitedStringsConverterTests.java[tags=ex-csv-converter-model,indent=0] +include::{example-dir-collection}/asbasic/CommaDelimitedStringsConverterTests.java[tags=ex-csv-converter-model,indent=0] ---- ==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/customizing.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/customizing.adoc index fd55cba3160b..438815a0659a 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/customizing.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/customizing.adoc @@ -1,8 +1,8 @@ [[domain-customizing]] === Customizing the domain model -:rootProjectDir: ../../../../../../.. -:coreProjectDir: {rootProjectDir}/hibernate-core -:attributeBinderTestDir: {coreProjectDir}/src/test/java/org/hibernate/orm/test/mapping/attributebinder +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-attributebinder: {core-project-dir}/src/test/java/org/hibernate/orm/test/mapping/attributebinder :extrasdir: extras For cases where Hibernate does not provide a built-in way to configure the domain @@ -18,9 +18,9 @@ An example: ==== [source,java] ---- -include::{attributeBinderTestDir}/YesNo.java[tag=attribute-binder-example, indent=0] +include::{example-dir-attributebinder}/YesNo.java[tag=attribute-binder-example, indent=0] -include::{attributeBinderTestDir}/YesNoBinder.java[tag=attribute-binder-example, indent=0] +include::{example-dir-attributebinder}/YesNoBinder.java[tag=attribute-binder-example, indent=0] ---- ==== @@ -32,4 +32,4 @@ it has the `@AttributeBinderType` meta-annotation and knows how to apply that th Notice also that `@AttributeBinderType` provides a type-safe way to perform configuration because the `AttributeBinder` (`YesNoBinder`) is handed the custom annotation (`@YesNo`) to grab its configured attributes. `@YesNo` does not provide any attributes, but it easily could. Whatever `YesNoBinder` -supports. \ No newline at end of file +supports. diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/dynamic_model.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/dynamic_model.adoc index 26e914897afb..9a6b7d07a587 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/dynamic_model.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/dynamic_model.adoc @@ -1,7 +1,9 @@ [[dynamic-model]] === Dynamic Model -:sourcedir: ../../../../../test/java/org/hibernate/userguide/mapping/dynamic -:mappingdir: ../../../../../test/resources/org/hibernate/userguide/mapping/dynamic +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-dynamic: {core-project-dir}/src/test/java/org/hibernate/orm/test/mapping/dynamic +:example-dir-resources: {core-project-dir}/src/test/resources/org/hibernate/orm/test/mapping/dynamic :extrasdir: extras [IMPORTANT] @@ -26,7 +28,7 @@ Entity modes can now be mixed within a domain model; a dynamic entity might refe ==== [source,xml] ---- -include::{mappingdir}/Book.hbm.xml[tag=mapping-model-dynamic-example, indent=0] +include::{example-dir-resources}/Book.hbm.xml[tag=mapping-model-dynamic-example, indent=0] ---- ==== @@ -37,7 +39,7 @@ After you defined your entity mapping, you need to instruct Hibernate to use the ==== [source,java] ---- -include::{sourcedir}/DynamicEntityTest.java[tag=mapping-model-dynamic-setting-example, indent=0] +include::{example-dir-dynamic}/DynamicEntityTest.java[tag=mapping-model-dynamic-setting-example, indent=0] ---- ==== @@ -49,7 +51,7 @@ Hibernate is going to generate the following SQL statement: ==== [source,java] ---- -include::{sourcedir}/DynamicEntityTest.java[tag=mapping-model-dynamic-example, indent=0] +include::{example-dir-dynamic}/DynamicEntityTest.java[tag=mapping-model-dynamic-example, indent=0] ---- [source,sql] @@ -66,4 +68,4 @@ However, as a result of the Hibernate mapping, the database schema can easily be It is also interesting to note that dynamic models are great for certain integration use cases as well. Envers, for example, makes extensive use of dynamic models to represent the historical data. -==== \ No newline at end of file +==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/embeddables.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/embeddables.adoc index 098dc1d649d0..1e2fcd9d96dd 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/embeddables.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/embeddables.adoc @@ -1,11 +1,11 @@ [[embeddables]] === Embeddable values -:rootProjectDir: ../../../../../../.. -:sourcedir: ../../../../../test/java/org/hibernate/userguide/mapping/embeddable -:coreProjectDir: {rootProjectDir}/hibernate-core -:coreTestSrcDir: {rootProjectDir}/hibernate-core/src/test/java -:instantiatorTestDir: {coreTestSrcDir}/org/hibernate/orm/test/mapping/embeddable/strategy/instantiator -:usertypeTestDir: {coreTestSrcDir}/org/hibernate/orm/test/mapping/embeddable/strategy/usertype +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:core-test-base: {root-project-dir}/hibernate-core/src/test/java +:example-dir-emeddable: {core-test-base}/org/hibernate/orm/test/mapping/embeddable +:example-dir-embeddableinstantiator: {core-test-base}/org/hibernate/orm/test/mapping/embeddable/strategy/instantiator +:example-dir-compositeusertype: {core-test-base}/org/hibernate/orm/test/mapping/embeddable/strategy/usertype :extrasdir: extras Historically Hibernate called these components. @@ -28,7 +28,7 @@ Throughout this chapter and thereafter, for brevity sake, embeddable types may a ==== [source,java] ---- -include::{sourcedir}/NestedEmbeddableTest.java[tag=embeddable-type-mapping-example, indent=0] +include::{example-dir-emeddable}/NestedEmbeddableTest.java[tag=embeddable-type-mapping-example, indent=0] ---- ==== @@ -45,7 +45,7 @@ Most often, embeddable types are used to group multiple basic type mappings and ==== [source,java] ---- -include::{sourcedir}/SimpleEmbeddableTest.java[tag=embeddable-type-mapping-example, indent=0] +include::{example-dir-emeddable}/SimpleEmbeddableTest.java[tag=embeddable-type-mapping-example, indent=0] ---- [source,sql] @@ -75,7 +75,7 @@ In fact, that table could also be mapped by the following entity type instead. ==== [source,java] ---- -include::{sourcedir}/SimpleEmbeddableEquivalentTest.java[tag=embeddable-type-mapping-example, indent=0] +include::{example-dir-emeddable}/SimpleEmbeddableEquivalentTest.java[tag=embeddable-type-mapping-example, indent=0] ---- ==== @@ -106,7 +106,7 @@ which defines a `@ManyToOne` association with the `Country` entity: ==== [source,java] ---- -include::{sourcedir}/EmbeddableOverrideTest.java[tag=embeddable-type-association-mapping-example, indent=0] +include::{example-dir-emeddable}/EmbeddableOverrideTest.java[tag=embeddable-type-association-mapping-example, indent=0] ---- [source,sql] @@ -125,7 +125,7 @@ Therefore, the `Book` entity needs to override the embeddable type mappings for ==== [source,java] ---- -include::{sourcedir}/EmbeddableOverrideTest.java[tag=embeddable-type-override-mapping-example, indent=0] +include::{example-dir-emeddable}/EmbeddableOverrideTest.java[tag=embeddable-type-override-mapping-example, indent=0] ---- [source,sql] @@ -178,7 +178,7 @@ However, for simple embeddable types, there is no such construct and so you need ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/TargetTest.java[tags=embeddable-Target-example] +include::{example-dir-emeddable}/TargetTest.java[tags=embeddable-Target-example] ---- ==== @@ -193,7 +193,7 @@ Assuming we have persisted the following `City` entity: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/TargetTest.java[tags=embeddable-Target-persist-example] +include::{example-dir-emeddable}/TargetTest.java[tags=embeddable-Target-persist-example] ---- ==== @@ -204,7 +204,7 @@ When fetching the `City` entity, the `coordinates` property is mapped by the `@T ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/TargetTest.java[tags=embeddable-Target-fetching-example] +include::{example-dir-emeddable}/TargetTest.java[tags=embeddable-Target-fetching-example] ---- [source, SQL, indent=0] @@ -225,7 +225,7 @@ The Hibernate-specific `@Parent` annotation allows you to reference the owner en ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ParentTest.java[tags=embeddable-Parent-example] +include::{example-dir-emeddable}/ParentTest.java[tags=embeddable-Parent-example] ---- ==== @@ -236,7 +236,7 @@ Assuming we have persisted the following `City` entity: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ParentTest.java[tags=embeddable-Parent-persist-example] +include::{example-dir-emeddable}/ParentTest.java[tags=embeddable-Parent-persist-example] ---- ==== @@ -247,7 +247,7 @@ When fetching the `City` entity, the `city` property of the embeddable type acts ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ParentTest.java[tags=embeddable-Parent-fetching-example] +include::{example-dir-emeddable}/ParentTest.java[tags=embeddable-Parent-fetching-example] ---- ==== @@ -270,7 +270,7 @@ embeddable: ==== [source, JAVA, indent=0] ---- -include::{instantiatorTestDir}/embedded/Name.java[tags=embeddable-instantiator-embeddable] +include::{example-dir-embeddableinstantiator}/embedded/Name.java[tags=embeddable-instantiator-embeddable] ---- ==== @@ -282,7 +282,7 @@ conventions, in terms of constructor, a custom strategy for instantiation is nee ==== [source, JAVA, indent=0] ---- -include::{instantiatorTestDir}/embedded/NameInstantiator.java[tags=embeddable-instantiator-impl] +include::{example-dir-embeddableinstantiator}/embedded/NameInstantiator.java[tags=embeddable-instantiator-impl] ---- ==== @@ -294,7 +294,7 @@ annotation can be used on the embedded attribute: ==== [source, JAVA, indent=0] ---- -include::{instantiatorTestDir}/embedded/Person.java[tags=embeddable-instantiator-property] +include::{example-dir-embeddableinstantiator}/embedded/Person.java[tags=embeddable-instantiator-property] ---- ==== @@ -305,9 +305,9 @@ include::{instantiatorTestDir}/embedded/Person.java[tags=embeddable-instantiator ==== [source, JAVA, indent=0] ---- -include::{instantiatorTestDir}/embeddable/Name.java[tags=embeddable-instantiator-class] +include::{example-dir-embeddableinstantiator}/embeddable/Name.java[tags=embeddable-instantiator-class] -include::{instantiatorTestDir}/embeddable/Person.java[tags=embeddable-instantiator-class] +include::{example-dir-embeddableinstantiator}/embeddable/Person.java[tags=embeddable-instantiator-class] ---- ==== @@ -321,7 +321,7 @@ on the <>. ==== [source, JAVA, indent=0] ---- -include::{instantiatorTestDir}/registered/Person.java[tags=embeddable-instantiator-registration] +include::{example-dir-embeddableinstantiator}/registered/Person.java[tags=embeddable-instantiator-registration] ---- ==== @@ -346,7 +346,7 @@ For example, consider the following custom type: ==== [source, JAVA, indent=0] ---- -include::{usertypeTestDir}/embedded/Name.java[tags=embeddable-usertype-domain] +include::{example-dir-compositeusertype}/embedded/Name.java[tags=embeddable-usertype-domain] ---- ==== @@ -358,7 +358,7 @@ conventions, a custom user type for instantiation and state access is needed. ==== [source, JAVA, indent=0] ---- -include::{usertypeTestDir}/embedded/NameCompositeUserType.java[tags=embeddable-usertype-impl] +include::{example-dir-compositeusertype}/embedded/NameCompositeUserType.java[tags=embeddable-usertype-impl] ---- ==== @@ -381,7 +381,7 @@ annotation can be used on the embedded and element collection attributes: ==== [source, JAVA, indent=0] ---- -include::{usertypeTestDir}/embedded/Person.java[tags=embeddable-usertype-property] +include::{example-dir-compositeusertype}/embedded/Person.java[tags=embeddable-usertype-property] ---- ==== @@ -393,7 +393,7 @@ when the application developer wants to apply the composite user type for all do ==== [source, JAVA, indent=0] ---- -include::{usertypeTestDir}/registered/Person.java[tags=embeddable-usertype-registration] +include::{example-dir-compositeusertype}/registered/Person.java[tags=embeddable-usertype-registration] ---- ==== @@ -417,7 +417,7 @@ However, for the purposes of this discussion, Hibernate has the capability to in ==== [source,java] ---- -include::{sourcedir}/EmbeddableImplicitOverrideTest.java[tag=embeddable-multiple-namingstrategy-entity-mapping, indent=0] +include::{example-dir-emeddable}/EmbeddableImplicitOverrideTest.java[tag=embeddable-multiple-namingstrategy-entity-mapping, indent=0] ---- ==== @@ -428,7 +428,7 @@ To make it work, you need to use the `ImplicitNamingStrategyComponentPathImpl` n ==== [source,java] ---- -include::{sourcedir}/EmbeddableImplicitOverrideTest.java[tag=embeddable-multiple-ImplicitNamingStrategyComponentPathImpl, indent=0] +include::{example-dir-emeddable}/EmbeddableImplicitOverrideTest.java[tag=embeddable-multiple-ImplicitNamingStrategyComponentPathImpl, indent=0] ---- ==== @@ -439,4 +439,174 @@ Now the "path" to attributes are used in the implicit column naming: include::{extrasdir}/embeddable/embeddable-multiple-namingstrategy-entity-mapping.sql[] ---- -You could even develop your own naming strategy to do other types of implicit naming strategies. \ No newline at end of file +You could even develop your own naming strategy to do other types of implicit naming strategies. + +[[embeddable-mapping-aggregate]] +==== Aggregate embeddable mapping + +An embeddable mapping is usually just a way to encapsulate columns of a table into a Java type, +but as of Hibernate 6.2, it is also possible to map embeddable types as SQL aggregate types. + +Currently, there are three possible SQL aggregate types which can be specified by annotating one of the following +annotations on a persistent attribute: + +* `@Struct` - maps to a named SQL object type +* `@JdbcTypeCode(SqlTypes.JSON)` - maps to the SQL type JSON +* `@JdbcTypeCode(SqlTypes.SQLXML)` - maps to the SQL type XML + +Any read or assignment (in an update statement) expression for an attribute of such an embeddable +will resolve to the proper SQL expression to access/update the attribute of the SQL type. + +Since object, JSON and XML types are not supported equally on all databases, beware that not every mapping will work on all databases. +The following table outlines the current support for the different aggregate types: + +|=== +|Database |Struct |JSON |XML + +|PostgreSQL +|Yes +|Yes +|No (not yet) + +|Oracle +|Yes +|Yes +|No (not yet) + +|DB2 +|Yes +|No (not yet) +|No (not yet) + +|SQL Server +|No (not yet) +|No (not yet) +|No (not yet) +|=== + +Also note that embeddable types that are used in aggregate mappings do not yet support all kinds of attribute mappings, most notably: + +* Association mappings (`@ManyToOne`, `@OneToOne`, `@OneToMany`, `@ManyToMany`, `@ElementCollection`) +* Basic array mappings + +===== `@Struct` aggregate embeddable mapping + +The `@Struct` annotation can be placed on either the persistent attribute, or the embeddable type, +and requires the specification of a name i.e. the name of the SQL object type that it maps to. + +The following example mapping, maps the `EmbeddableAggregate` type to the SQL object type `structType`: + +.Mapping embeddable as SQL object type on persistent attribute level +==== +[source,java] +---- +include::{example-dir-emeddable}/StructEmbeddableTest.java[tag=embeddable-struct-type-mapping-example, indent=0] +---- +==== + +The schema generation will by default emit DDL for that object type, which looks something along the lines of + +==== +[source,sql] +---- +create type structType as ( + ... +) +create table StructHolder as ( + id bigint not null primary key, + aggregate structType +) +---- +==== + +The name and the nullability of the column can be refined through applying a `@Column` on the persistent attribute. + +One very important thing to note is that the order of columns in the DDL definition of a type must match the order that Hibernate expects. +By default, the order of columns is based on the alphabetical ordering of the embeddable type attribute names. + +Consider the following class: + +==== +[source,java] +---- +@Embeddable +@Struct(name = "myStruct") +public class MyStruct { + @Column(name = "b") + String attr1; + @Column(name = "a") + String attr2; +} +---- +==== + +The expected ordering of columns will be `(b,a)`, because the name `attr1` comes before `attr2` in alphabetical ordering. +This example aims at showing the importance of the persistent attribute name. + +Defining the embeddable type as Java record instead of a class can force a particular ordering through the definition of canonical constructor. + +==== +[source,java] +---- +@Embeddable +@Struct(name = "myStruct") +public record MyStruct ( + @Column(name = "a") + String attr2, + @Column(name = "b") + String attr1 +) {} +---- +==== + +In this particular example, the expected ordering of columns will be `(a,b)`, because the canonical constructor of the record +defines a specific ordering of persistent attributes, which Hibernate makes use of for `@Struct` mappings. + +It is not necessary to switch to Java records to configure the order though. +The `@Struct` annotation allows specifying the order through the `attributes` member, +an array of attribute names that the embeddable type declares, which defines the order in columns appear in the SQL object type. + +The same ordering as with the Java record can be achieved this way: + +==== +[source,java] +---- +@Embeddable +@Struct(name = "myStruct", attributes = {"attr2", "attr1"}) +public class MyStruct { + @Column(name = "b") + String attr1; + @Column(name = "a") + String attr2; +} +---- +==== + +===== JSON/XML aggregate embeddable mapping + +The `@JdbcTypeCode` annotation for JSON and XML mappings can only be placed on the persistent attribute. + +The following example mapping, maps the `EmbeddableAggregate` type to the JSON SQL type: + +.Mapping embeddable as JSON +==== +[source,java] +---- +include::{example-dir-emeddable}/JsonEmbeddableTest.java[tag=embeddable-json-type-mapping-example, indent=0] +---- +==== + +The schema generation will by default emit DDL that ensures the constraints of the embeddable type are respected, which looks something along the lines of + +==== +[source,sql] +---- +create table JsonHolder as ( + id bigint not null primary key, + aggregate json, + check (json_value(aggregate, '$.attribute1') is not null) +) +---- +==== + +Again, the name and the nullability of the `aggregate` column can be refined through applying a `@Column` on the persistent attribute. diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/entity.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/entity.adoc index 1481ae5227e1..6cd85c3e54c7 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/entity.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/entity.adoc @@ -1,9 +1,11 @@ [[entity]] === Entity types -:sourcedir-locking: ../../../../../test/java/org/hibernate/userguide/locking -:sourcedir-mapping: ../../../../../test/java/org/hibernate/userguide/mapping -:sourcedir-proxy: ../../../../../test/java/org/hibernate/userguide/proxy -:sourcedir-persister: ../../../../../test/java/org/hibernate/userguide/persister +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:core-test-base: {core-project-dir}/src/test/java/org/hibernate/orm/test +:example-dir-mapping: {core-test-base}/mapping +:example-dir-proxy: {core-test-base}/proxy +:example-dir-persister: {core-test-base}/persister :extrasdir: extras .Usage of the word _entity_ @@ -105,7 +107,7 @@ The placement of the `@Id` annotation marks the <>, <>, +<> and <>. Unfortunately, it +has slightly different impacts depending on where it is placed; see the linked sections for details. + + + + +[[mutability-entity]] ==== Entity immutability If a specific entity is immutable, it is good practice to mark it with the `@Immutable` annotation. @@ -13,13 +39,13 @@ If a specific entity is immutable, it is good practice to mark it with the `@Imm ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityImmutabilityTest.java[tags=entity-immutability-example] +include::{mutability-example-dir}/entity/EntityImmutabilityTest.java[tags=entity-immutability-example] ---- ==== Internally, Hibernate is going to perform several optimizations, such as: -- reducing memory footprint since there is no need to retain the dehydrated state for the dirty checking mechanism +- reducing memory footprint since there is no need to retain the loaded state for the dirty checking mechanism - speeding-up the Persistence Context flushing phase since immutable entities can skip the dirty checking process Considering the following entity is persisted in the database: @@ -28,7 +54,7 @@ Considering the following entity is persisted in the database: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityImmutabilityTest.java[tags=entity-immutability-persist-example] +include::{mutability-example-dir}/entity/EntityImmutabilityTest.java[tags=entity-immutability-persist-example] ---- ==== @@ -39,7 +65,7 @@ Hibernate will skip any modification, therefore no SQL `UPDATE` statement is exe ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityImmutabilityTest.java[tags=entity-immutability-update-example] +include::{mutability-example-dir}/entity/EntityImmutabilityTest.java[tags=entity-immutability-update-example] ---- [source, SQL, indent=0] @@ -48,28 +74,70 @@ include::{extrasdir}/entity-immutability-update-example.sql[] ---- ==== -==== Collection immutability -Just like entities, collections can also be marked with the `@Immutable` annotation. +`@Mutability` is not allowed on an entity. + + -Considering the following entity mappings: -.Immutable collection + +[[mutability-attribute]] +==== Attribute mutability + +The `@Immutable` annotation may also be used on attributes. The impact varies +slightly depending on the exact kind of attribute. + +`@Mutability` on an attribute applies the specified `MutabilityPlan` to the attribute for handling +internal state changes in the values for the attribute. + + +[[mutability-attribute-basic]] +===== Attribute immutability - basic + +When applied to a basic attribute, `@Immutable` implies immutability in both the updateable +and internal-state sense. E.g. + +.Immutable basic attribute ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CollectionImmutabilityTest.java[tags=collection-immutability-example] +include::{mutability-example-dir}/attribute/BasicAttributeMutabilityTests.java[tags=attribute-immutable-example] ---- ==== -This time, not only the `Event` entity is immutable, but the `Event` collection stored by the `Batch` parent entity. -Once the immutable collection is created, it can never be modified. +Changes to the `theDate` attribute are ignored. + +.Immutable basic attribute change +==== +[source, JAVA, indent=0] +---- +include::{mutability-example-dir}/attribute/BasicAttributeMutabilityTests.java[tags=attribute-immutable-managed-example] +---- +==== + + +[[mutability-attribute-embeddable]] +===== Attribute immutability - embeddable + +To be continued.. + +// todo : document the effect of `@Immutable` on `@Embeddable`, `@Embedded` and `@EmbeddedId` mappings + + +[[mutability-attribute-plural]] +===== Attribute immutability - plural + +Plural attributes (`@ElementCollection`, @OneToMany`, `@ManyToMany` and `@ManyToAny`) may also +be annotated with `@Immutable`. + +TIP:: While most immutable changes are simply discarded, modifying an immutable collection will cause an exception. + .Persisting an immutable collection ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CollectionImmutabilityTest.java[tags=collection-immutability-persist-example] +include::{mutability-example-dir}/attribute/PluralAttributeMutabilityTest.java[tags=collection-immutability-persist-example] ---- ==== @@ -81,7 +149,7 @@ For instance, we can still modify the entity name: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CollectionImmutabilityTest.java[tags=collection-entity-update-example] +include::{mutability-example-dir}/attribute/PluralAttributeMutabilityTest.java[tags=collection-entity-update-example] ---- [source, SQL, indent=0] @@ -96,7 +164,7 @@ However, when trying to modify the `events` collection: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CollectionImmutabilityTest.java[tags=collection-immutability-update-example] +include::{mutability-example-dir}/attribute/PluralAttributeMutabilityTest.java[tags=collection-immutability-update-example] ---- [source, bash, indent=0] @@ -105,7 +173,96 @@ include::{extrasdir}/collection-immutability-update-example.log.txt[] ---- ==== -[TIP] + +[[mutability-attribute-entity]] +===== Attribute immutability - entity + +To be continued.. + +// todo : document the effect of `@Immutable` on `@OneToOne`, `@ManyToOne` and `@Any` mappings + + +[[mutability-converter]] +==== AttributeConverter mutability + +Declaring `@Mutability` on an `AttributeConverter` applies the specified `MutabilityPlan` to +all value mappings (attribute, collection element, etc.) to which the converter is applied. + +Declaring `@Immutable` on an `AttributeConverter` is shorthand for declaring `@Mutability` with an +immutable `MutabilityPlan`. + + +[[mutability-usertype]] +==== UserType mutability + +Similar to <> both `@Mutability` and `@Immutable` may +be declared on a `UserType`. + +`@Mutability` applies the specified `MutabilityPlan` to all value mappings (attribute, collection element, etc.) +to which the `UserType` is applied. + + +`@Immutable` applies an immutable `MutabilityPlan` to all value mappings (attribute, collection element, etc.) +to which the `UserType` is applied. + + +[[mutability-mutability]] +==== @Mutability + +`MutabilityPlan` is the contract used by Hibernate to abstract mutability concerns, in the sense of internal state changes. + +A Java type has an inherent `MutabilityPlan` based on its `JavaType#getMutabilityPlan`. + +The `@Mutability` annotation allows a specific `MutabilityPlan` to be used and is allowed on an +attribute, `AttributeConverter` and `UserType`. When used on a `AttributeConverter` or `UserType`, +the specified `MutabilityPlan` is effective for all basic values to which the `AttributeConverter` or +`UserType` is applied. + +To understand the impact of internal-state mutability, consider the following entity: + +.Basic mutability model +==== +[source, JAVA, indent=0] +---- +include::{mutability-example-dir}/MutabilityBaselineEntity.java[tags=mutability-base-entity-example] +---- +==== + +When dealing with an inherently immutable value, such as a `String`, there is only one way to +update the value: + +.Changing immutable value ==== -While immutable entity changes are simply discarded, modifying an immutable collection will end up in a `HibernateException` being thrown. -==== \ No newline at end of file +[source, JAVA, indent=0] +---- +include::{mutability-example-dir}/MutabilityBaselineEntity.java[tags=mutability-base-string-example] +---- +==== + +During flush, this change will make the entity "dirty" and the changes will be written (UPDATE) to +the database. + +When dealing with mutable values, however, Hibernate must be aware of both ways to change the value. First, like +with the immutable value, we can set the new value: + +.Changing mutable value - setting +==== +[source, JAVA, indent=0] +---- +include::{mutability-example-dir}/MutabilityBaselineEntity.java[tags=mutability-base-date-set-example] +---- +==== + +We can also mutate the existing value: + +.Changing mutable value - mutating +==== +[source, JAVA, indent=0] +---- +include::{mutability-example-dir}/MutabilityBaselineEntity.java[tags=mutability-base-date-mutate-example] +---- +==== + +This mutating example has the same effect as the setting example - they each will make the entity dirty. + + diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/inheritance.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/inheritance.adoc index 82fa78d617e7..08b4d3695658 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/inheritance.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/inheritance.adoc @@ -1,6 +1,8 @@ [[entity-inheritance]] === Inheritance -:sourcedir: ../../../../../test/java/org/hibernate/userguide/inheritance +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-inheritance: {core-project-dir}/src/test/java/org/hibernate/orm/test/inheritance :extrasdir: extras/inheritance Although relational database systems don't provide support for inheritance, Hibernate provides several strategies to leverage this object-oriented trait onto domain model entities: @@ -24,7 +26,7 @@ When using `MappedSuperclass`, the inheritance is visible in the domain model on ==== [source,java] ---- -include::{sourcedir}/MappedSuperclassTest.java[tags=entity-inheritance-mapped-superclass-example,indent=0] +include::{example-dir-inheritance}/MappedSuperclassTest.java[tags=entity-inheritance-mapped-superclass-example,indent=0] ---- [source,sql] @@ -56,7 +58,7 @@ When omitting an explicit inheritance strategy (e.g. `@Inheritance`), Jakarta Pe ==== [source,java] ---- -include::{sourcedir}/SingleTableTest.java[tags=entity-inheritance-single-table-example,indent=0] +include::{example-dir-inheritance}/SingleTableTest.java[tags=entity-inheritance-single-table-example,indent=0] ---- [source,sql] @@ -73,7 +75,7 @@ If this is not specified, the `DTYPE` column is used as a discriminator, storing ==== [source,java] ---- -include::{sourcedir}/SingleTableTest.java[tags=entity-inheritance-single-table-persist-example,indent=0] +include::{example-dir-inheritance}/SingleTableTest.java[tags=entity-inheritance-single-table-persist-example,indent=0] ---- [source,sql] @@ -89,7 +91,7 @@ When using polymorphic queries, only a single table is required to be scanned to ==== [source,java] ---- -include::{sourcedir}/SingleTableTest.java[tags=entity-inheritance-single-table-query-example,indent=0] +include::{example-dir-inheritance}/SingleTableTest.java[tags=entity-inheritance-single-table-query-example,indent=0] ---- [source,sql] @@ -146,7 +148,7 @@ we can take advantage of the Hibernate specific `@DiscriminatorFormula` annotati ==== [source,java] ---- -include::{sourcedir}/SingleTableDiscriminatorFormulaTest.java[tags=entity-inheritance-single-table-discriminator-formula-example,indent=0] +include::{example-dir-inheritance}/SingleTableDiscriminatorFormulaTest.java[tags=entity-inheritance-single-table-discriminator-formula-example,indent=0] ---- [source,sql] @@ -173,7 +175,7 @@ To understand how these two values work, consider the following entity mapping: ==== [source,java] ---- -include::{sourcedir}/DiscriminatorNotNullSingleTableTest.java[tags=entity-inheritance-single-table-discriminator-value-example,indent=0] +include::{example-dir-inheritance}/DiscriminatorNotNullSingleTableTest.java[tags=entity-inheritance-single-table-discriminator-value-example,indent=0] ---- ==== @@ -188,7 +190,7 @@ To visualize how it works, consider the following example: ==== [source,java] ---- -include::{sourcedir}/DiscriminatorNotNullSingleTableTest.java[tags=entity-inheritance-single-table-discriminator-value-persist-example,indent=0] +include::{example-dir-inheritance}/DiscriminatorNotNullSingleTableTest.java[tags=entity-inheritance-single-table-discriminator-value-persist-example,indent=0] ---- [source,sql] @@ -215,7 +217,7 @@ Each subclass must, however, declare a table column holding the object identifie ==== [source,java] ---- -include::{sourcedir}/JoinTableTest.java[tags=entity-inheritance-joined-table-example,indent=0] +include::{example-dir-inheritance}/JoinTableTest.java[tags=entity-inheritance-joined-table-example,indent=0] ---- [source,sql] @@ -237,7 +239,7 @@ Also, if `@PrimaryKeyJoinColumn` is not set, the primary key / foreign key colum ==== [source,java] ---- -include::{sourcedir}/JoinTablePrimaryKeyJoinColumnTest.java[tags=entity-inheritance-joined-table-primary-key-join-column-example,indent=0] +include::{example-dir-inheritance}/JoinTablePrimaryKeyJoinColumnTest.java[tags=entity-inheritance-joined-table-primary-key-join-column-example,indent=0] ---- [source,sql] @@ -253,7 +255,7 @@ When using polymorphic queries, the base class table must be joined with all sub ==== [source,java] ---- -include::{sourcedir}/JoinTableTest.java[tags=entity-inheritance-joined-table-query-example,indent=0] +include::{example-dir-inheritance}/JoinTableTest.java[tags=entity-inheritance-joined-table-query-example,indent=0] ---- [source,sql] @@ -283,7 +285,7 @@ However, if you wish to use polymorphic associations (e.g. an association to the ==== [source,java] ---- -include::{sourcedir}/TablePerClassTest.java[tags=entity-inheritance-table-per-class-example,indent=0] +include::{example-dir-inheritance}/TablePerClassTest.java[tags=entity-inheritance-table-per-class-example,indent=0] ---- [source,sql] @@ -298,7 +300,7 @@ When using polymorphic queries, a UNION is required to fetch the base class tabl ==== [source,java] ---- -include::{sourcedir}/TablePerClassTest.java[tags=entity-inheritance-table-per-class-query-example,indent=0] +include::{example-dir-inheritance}/TablePerClassTest.java[tags=entity-inheritance-table-per-class-query-example,indent=0] ---- [source,sql] @@ -310,23 +312,6 @@ include::{extrasdir}/entity-inheritance-table-per-class-query-example.sql[] [IMPORTANT] ==== Polymorphic queries require multiple UNION queries, so be aware of the performance implications of a large class hierarchy. - -Unfortunately, not all database systems support UNION ALL, in which case, UNION is going to be used instead of UNION ALL. - -The following Hibernate dialects support UNION ALL: - -- `AbstractHANADialect` -- `AbstractTransactSQLDialect` -- `CUBRIDDialect` -- `DB2Dialect` -- `H2Dialect` -- `HSQLDialect` -- `Ingres9Dialect` -- `MySQL5Dialect` -- `Oracle8iDialect` -- `Oracle9Dialect` -- `PostgreSQL81Dialect` -- `RDMSOS2200Dialect` ==== [[entity-inheritance-polymorphism]] @@ -345,7 +330,7 @@ For instance, considering the following `DomainModelEntity` interface: ==== [source,java] ---- -include::{sourcedir}/polymorphism/DomainModelEntity.java[tags=entity-inheritance-polymorphism-interface-example,indent=0] +include::{example-dir-inheritance}/polymorphism/DomainModelEntity.java[tags=entity-inheritance-polymorphism-interface-example,indent=0] ---- ==== @@ -359,7 +344,7 @@ and taking the `PolymorphismType.EXPLICIT` setting: ==== [source,java] ---- -include::{sourcedir}/polymorphism/ExplicitPolymorphismTest.java[tags=entity-inheritance-polymorphism-mapping-example,indent=0] +include::{example-dir-inheritance}/polymorphism/ExplicitPolymorphismTest.java[tags=entity-inheritance-polymorphism-mapping-example,indent=0] ---- ==== @@ -370,7 +355,7 @@ If we have the following entity objects in our system: ==== [source,java] ---- -include::{sourcedir}/polymorphism/ExplicitPolymorphismTest.java[tags=entity-inheritance-polymorphism-persist-example,indent=0] +include::{example-dir-inheritance}/polymorphism/ExplicitPolymorphismTest.java[tags=entity-inheritance-polymorphism-persist-example,indent=0] ---- ==== @@ -384,10 +369,10 @@ or they are not annotated at all with the `@Polymorphism` annotation (implying t ==== [source,java] ---- -include::{sourcedir}/polymorphism/ExplicitPolymorphismTest.java[tags=entity-inheritance-polymorphism-fetch-example,indent=0] +include::{example-dir-inheritance}/polymorphism/ExplicitPolymorphismTest.java[tags=entity-inheritance-polymorphism-fetch-example,indent=0] ---- ==== Therefore, only the `Book` was fetched since the `Blog` entity was marked with the `@Polymorphism(type = PolymorphismType.EXPLICIT)` annotation, which instructs Hibernate -to skip it when executing a polymorphic query against a non-mapped base class. \ No newline at end of file +to skip it when executing a polymorphic query against a non-mapped base class. diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/naming.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/naming.adoc index 8e337e18e94f..ec90a7851160 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/naming.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/naming.adoc @@ -1,6 +1,8 @@ [[naming]] === Naming strategies -:sourcedir: ../../../../../test/java/org/hibernate/userguide/naming +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-naming: {core-project-dir}/src/test/java/org/hibernate/orm/test/naming Part of the mapping of an object model to the relational database is mapping names from the object model to the corresponding database names. @@ -113,7 +115,7 @@ whose naming standards are to: ==== [source,java] ---- -include::{sourcedir}/AcmeCorpPhysicalNamingStrategy.java[] +include::{example-dir-naming}/AcmeCorpPhysicalNamingStrategy.java[] ---- ==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/natural_id.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/natural_id.adoc index 3d742d6369d3..035cf1e07dd0 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/natural_id.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/natural_id.adoc @@ -1,6 +1,10 @@ [[naturalid]] === Natural Ids -:sourcedir: ../../../../../test/java/org/hibernate/userguide/mapping/identifier +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-naturalid: {core-project-dir}/src/test/java/org/hibernate/orm/test/mapping/identifier +:jcache-project-dir: {root-project-dir}/hibernate-jcache +:example-dir-caching: {jcache-project-dir}/src/test/java/org/hibernate/orm/test/caching :extrasdir: extras Natural ids represent domain model unique identifiers that have a meaning in the real world too. @@ -17,7 +21,7 @@ Natural ids are defined in terms of one or more persistent attributes. ==== [source,java] ---- -include::{sourcedir}/SimpleNaturalIdTest.java[tags=naturalid-simple-basic-attribute-mapping-example,indent=0] +include::{example-dir-naturalid}/SimpleNaturalIdTest.java[tags=naturalid-simple-basic-attribute-mapping-example,indent=0] ---- ==== @@ -26,7 +30,7 @@ include::{sourcedir}/SimpleNaturalIdTest.java[tags=naturalid-simple-basic-attrib ==== [source,java] ---- -include::{sourcedir}/CompositeNaturalIdTest.java[tags=naturalid-single-embedded-attribute-mapping-example,indent=0] +include::{example-dir-naturalid}/CompositeNaturalIdTest.java[tags=naturalid-single-embedded-attribute-mapping-example,indent=0] ---- ==== @@ -35,7 +39,7 @@ include::{sourcedir}/CompositeNaturalIdTest.java[tags=naturalid-single-embedded- ==== [source,java] ---- -include::{sourcedir}/MultipleNaturalIdTest.java[tags=naturalid-multiple-attribute-mapping-example,indent=0] +include::{example-dir-naturalid}/MultipleNaturalIdTest.java[tags=naturalid-multiple-attribute-mapping-example,indent=0] ---- ==== @@ -55,17 +59,17 @@ If the entity does not define a natural id, trying to load an entity by its natu ==== [source,java] ---- -include::{sourcedir}/SimpleNaturalIdTest.java[tags=naturalid-load-access-example,indent=0] +include::{example-dir-naturalid}/SimpleNaturalIdTest.java[tags=naturalid-load-access-example,indent=0] ---- [source,java] ---- -include::{sourcedir}/CompositeNaturalIdTest.java[tags=naturalid-load-access-example,indent=0] +include::{example-dir-naturalid}/CompositeNaturalIdTest.java[tags=naturalid-load-access-example,indent=0] ---- [source,java] ---- -include::{sourcedir}/MultipleNaturalIdTest.java[tags=naturalid-load-access-example,indent=0] +include::{example-dir-naturalid}/MultipleNaturalIdTest.java[tags=naturalid-load-access-example,indent=0] ---- ==== @@ -88,12 +92,12 @@ Because the `Book` entities in the first two examples define "simple" natural id ==== [source,java] ---- -include::{sourcedir}/SimpleNaturalIdTest.java[tags=naturalid-simple-load-access-example,indent=0] +include::{example-dir-naturalid}/SimpleNaturalIdTest.java[tags=naturalid-simple-load-access-example,indent=0] ---- [source,java] ---- -include::{sourcedir}/CompositeNaturalIdTest.java[tags=naturalid-simple-load-access-example,indent=0] +include::{example-dir-naturalid}/CompositeNaturalIdTest.java[tags=naturalid-simple-load-access-example,indent=0] ---- ==== @@ -122,7 +126,7 @@ If the value(s) of the natural id attribute(s) change, `@NaturalId(mutable = tru ==== [source,java] ---- -include::{sourcedir}/MutableNaturalIdTest.java[tags=naturalid-mutable-mapping-example,indent=0] +include::{example-dir-naturalid}/MutableNaturalIdTest.java[tags=naturalid-mutable-mapping-example,indent=0] ---- ==== @@ -144,7 +148,7 @@ This will force Hibernate to circumvent the checking of mutable natural ids. ==== [source,java] ---- -include::{sourcedir}/MutableNaturalIdTest.java[tags=naturalid-mutable-synchronized-example,indent=0] +include::{example-dir-naturalid}/MutableNaturalIdTest.java[tags=naturalid-mutable-synchronized-example,indent=0] ---- ==== @@ -155,6 +159,6 @@ Not only can this NaturalId-to-PK resolution be cached in the Session, but we ca ==== [source,java] ---- -include::{sourcedir}/CacheableNaturalIdTest.java[tags=naturalid-cacheable-mapping-example,indent=0] +include::{example-dir-caching}/CacheableNaturalIdTest.java[tags=naturalid-cacheable-mapping-example,indent=0] ---- -==== \ No newline at end of file +==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/partitioning.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/partitioning.adoc new file mode 100644 index 000000000000..97e90014e4d1 --- /dev/null +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/partitioning.adoc @@ -0,0 +1,41 @@ +[[partitioning]] +=== Partitioning +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-partition-key: {core-project-dir}/src/test/java/org/hibernate/orm/test/mapping/identifier +:extrasdir: extras + +In data management, it is sometimes necessary to split data of a table into various (physical) partitions, +based on partition keys and a partitioning scheme. + +Due to the nature of partitioning, it is vital for the database to know the partition key of a row for certain operations, +like SQL update and delete statements. If a database doesn't know the partition of a row that should be updated or deleted, +then it must look for the row in all partitions, leading to poor performance. + +The `@PartitionKey` annotation is a way to tell Hibernate about the column, such that it can include a column restriction as +predicate into SQL update and delete statements for entity state changes. + +[[partition-key-mapping]] +==== Partition Key Mapping + +Partition keys are defined in terms of one or more persistent attributes. + +[[partition-key-simple-basic-attribute-mapping-example]] +.Partition key using single basic attribute +==== +[source,java] +---- +include::{example-dir-partition-key}/SimplePartitionKeyTest.java[tags=partition-key-simple-basic-attribute-mapping-example,indent=0] +---- +==== + +When updating or deleting an entity, Hibernate will include a partition key constraint similar to this + +[[partition-key-simple-basic-attribute-sql-example]] +==== +[source,sql] +---- +update user_tbl set firstname=?,lastname=?,tenantKey=? where id=? and tenantKey=? +delete from user_tbl where id=? and tenantKey=? +---- +==== \ No newline at end of file diff --git a/documentation/src/main/asciidoc/userguide/chapters/domain/types.adoc b/documentation/src/main/asciidoc/userguide/chapters/domain/types.adoc index 6950a90efba5..051ad974d441 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/domain/types.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/domain/types.adoc @@ -1,6 +1,8 @@ [[mapping-types]] === Mapping types -:sourcedir: ../../../../../test/java/org/hibernate/userguide/mapping +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-mapping: {core-project-dir}/src/test/java/org/hibernate/orm/test/mapping :extrasdir: extras/types Hibernate understands both the Java and JDBC representations of application data. @@ -29,7 +31,7 @@ include::{extrasdir}/mapping-types-basic-example.sql[] [source, JAVA, indent=0] ---- -include::{sourcedir}/basic/TypeCategoryTest.java[tags=mapping-types-basic-example] +include::{example-dir-mapping}/basic/TypeCategoryTest.java[tags=mapping-types-basic-example] ---- ==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/envers/Envers.adoc b/documentation/src/main/asciidoc/userguide/chapters/envers/Envers.adoc index 095e9a289c2c..bada117a5970 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/envers/Envers.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/envers/Envers.adoc @@ -1,6 +1,8 @@ [[envers]] == Envers -:sourcedir: ../../../../../test/java/org/hibernate/userguide/envers +:root-project-dir: ../../../../../../.. +:envers-project-dir: {root-project-dir}/hibernate-envers +:example-dir-envers: {envers-project-dir}/src/test/java/org/hibernate/orm/test/envers :extrasdir: extras [[envers-basics]] @@ -44,7 +46,7 @@ Hibernate is going to generate the following tables using the `hibernate.hbm2ddl ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-mapping-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-mapping-example] ---- [source, SQL, indent=0] @@ -64,7 +66,7 @@ let's see how Envers auditing works when inserting, updating, and deleting the e ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-insert-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-insert-example] ---- [source, SQL, indent=0] @@ -78,7 +80,7 @@ include::{extrasdir}/envers-audited-insert-example.sql[] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-update-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-update-example] ---- [source, SQL, indent=0] @@ -92,7 +94,7 @@ include::{extrasdir}/envers-audited-update-example.sql[] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-delete-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-delete-example] ---- [source, SQL, indent=0] @@ -120,7 +122,7 @@ The audit (history) of an entity can be accessed using the `AuditReader` interfa ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-revisions-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-revisions-example] ---- [source, SQL, indent=0] @@ -136,7 +138,7 @@ Using the previously fetched revisions, we can now inspect the state of the `Cus ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-rev1-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-rev1-example] ---- [source, SQL, indent=0] @@ -159,7 +161,7 @@ The same goes for the second revision associated with the `UPDATE` statement. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-rev2-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-rev2-example] ---- ==== @@ -170,7 +172,7 @@ For the deleted entity revision, Envers throws a `NoResultException` since the e ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-rev3-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-rev3-example] ---- ==== @@ -184,7 +186,7 @@ all attributes, except for the entity identifier, are going to be `null`. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultAuditTest.java[tags=envers-audited-rev4-example] +include::{example-dir-envers}/DefaultAuditTest.java[tags=envers-audited-rev4-example] ---- ==== @@ -412,7 +414,7 @@ First, you need to configure the `ValidityAuditStrategy`: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ValidityStrategyAuditTest.java[tags=envers-audited-validity-configuration-example] +include::{example-dir-envers}/ValidityStrategyAuditTest.java[tags=envers-audited-validity-configuration-example] ---- ==== @@ -513,7 +515,7 @@ Considering we have a `CurrentUser` utility which stores the currently logged us ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CustomRevisionEntityTest.java[tags=envers-revisionlog-CurrentUser-example] +include::{example-dir-envers}/CustomRevisionEntityTest.java[tags=envers-revisionlog-CurrentUser-example] ---- ==== @@ -524,7 +526,7 @@ Now, we need to provide a custom `@RevisionEntity` to store the currently logged ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CustomRevisionEntityTest.java[tags=envers-revisionlog-RevisionEntity-example] +include::{example-dir-envers}/CustomRevisionEntityTest.java[tags=envers-revisionlog-RevisionEntity-example] ---- ==== @@ -537,7 +539,7 @@ of `RevisionEntity` instances. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CustomRevisionEntityTest.java[tags=envers-revisionlog-RevisionListener-example] +include::{example-dir-envers}/CustomRevisionEntityTest.java[tags=envers-revisionlog-RevisionListener-example] ---- ==== @@ -561,7 +563,7 @@ Now, when inserting a `Customer` entity, Envers generates the following statemen ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CustomRevisionEntityTest.java[tags=envers-revisionlog-RevisionEntity-persist-example] +include::{example-dir-envers}/CustomRevisionEntityTest.java[tags=envers-revisionlog-RevisionEntity-persist-example] ---- [source, SQL, indent=0] @@ -612,7 +614,7 @@ Tracking of modified entity names can be enabled in three different ways: + [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditDefaultTrackingTest.java[tags=envers-tracking-modified-entities-revchanges-example] +include::{example-dir-envers}/EntityTypeChangeAuditDefaultTrackingTest.java[tags=envers-tracking-modified-entities-revchanges-example] ---- + . Mark an appropriate field of a custom revision entity with `@org.hibernate.envers.ModifiedEntityNames` annotation. @@ -620,7 +622,7 @@ include::{sourcedir}/EntityTypeChangeAuditDefaultTrackingTest.java[tags=envers-t + [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-revchanges-example] +include::{example-dir-envers}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-revchanges-example] ---- Considering we have a `Customer` entity illustrated by the following example: @@ -630,7 +632,7 @@ Considering we have a `Customer` entity illustrated by the following example: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-revchanges-before-rename-example] +include::{example-dir-envers}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-revchanges-before-rename-example] ---- ==== @@ -642,7 +644,7 @@ Envers is going to insert a new record in the `REVCHANGES` table with the previo ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-revchanges-after-rename-example] +include::{example-dir-envers}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-revchanges-after-rename-example] ---- [source, SQL, indent=0] @@ -666,7 +668,7 @@ added, modified or removed within current revision boundaries. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-EntityTrackingRevisionListener-example] +include::{example-dir-envers}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-EntityTrackingRevisionListener-example] ---- ==== @@ -677,7 +679,7 @@ The `CustomTrackingRevisionListener` adds the fully-qualified class name to the ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-RevisionEntity-example] +include::{example-dir-envers}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-RevisionEntity-example] ---- ==== @@ -688,7 +690,7 @@ The `CustomTrackingRevisionEntity` contains a `@OneToMany` list of `ModifiedType ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-EntityType-example] +include::{example-dir-envers}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-EntityType-example] ---- ==== @@ -699,7 +701,7 @@ Now, when fetching the `CustomTrackingRevisionEntity`, you can get access to the ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-query-example] +include::{example-dir-envers}/EntityTypeChangeAuditTrackingRevisionListenerTest.java[tags=envers-tracking-modified-entities-revchanges-query-example] ---- ==== @@ -732,7 +734,7 @@ Because of costs mentioned, it is recommended to enable the feature selectively, ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ModifiedFlagsAuditTest.java[tags=envers-tracking-properties-changes-mapping-example] +include::{example-dir-envers}/ModifiedFlagsAuditTest.java[tags=envers-tracking-properties-changes-mapping-example] ---- [source, SQL, indent=0] @@ -748,7 +750,7 @@ As you can see, every property features a `_MOD` column (e.g. `createdOn_MOD`) i ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ModifiedFlagsAuditTest.java[tags=envers-tracking-properties-changes-example] +include::{example-dir-envers}/ModifiedFlagsAuditTest.java[tags=envers-tracking-properties-changes-example] ---- [source, SQL, indent=0] @@ -863,7 +865,7 @@ The entry point for this type of queries is: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=entities-at-revision-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=entities-at-revision-example] ---- ==== @@ -880,7 +882,7 @@ For example, to select only entities where the `firstName` property is equal to ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=entities-filtering-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=entities-filtering-example] ---- ==== @@ -892,7 +894,7 @@ you can use either the target entity or its identifier. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=entities-filtering-by-entity-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=entities-filtering-by-entity-example] ---- [source, SQL, indent=0] @@ -908,7 +910,7 @@ The same SQL is generated even if we provide the identifier instead of the targe ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=entities-filtering-by-entity-identifier-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=entities-filtering-by-entity-identifier-example] ---- ==== @@ -919,7 +921,7 @@ Apart from strict equality matching, you can also use an `IN` clause to provide ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=entities-in-clause-filtering-by-entity-identifier-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=entities-in-clause-filtering-by-entity-identifier-example] ---- [source, SQL, indent=0] @@ -938,7 +940,7 @@ A full query, can look for example like this: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=entities-filtering-and-pagination] +include::{example-dir-envers}/QueryAuditTest.java[tags=entities-filtering-and-pagination] ---- [source, SQL, indent=0] @@ -955,7 +957,7 @@ The entry point for this type of queries is: [[revisions-of-entity-query-example]] [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=revisions-of-entity-query-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=revisions-of-entity-query-example] ---- You can add constraints to this query in the same way as to the previous one. @@ -975,7 +977,7 @@ For example, the following query will select the smallest revision number, at wh [[revisions-of-entity-query-by-revision-number-example]] [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=revisions-of-entity-query-by-revision-number-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=revisions-of-entity-query-by-revision-number-example] ---- The second additional feature you can use in queries for revisions is the ability to _maximize_/_minimize_ a property. @@ -987,7 +989,7 @@ you can run the following query: [[revisions-of-entity-query-minimize-example]] [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=revisions-of-entity-query-minimize-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=revisions-of-entity-query-minimize-example] ---- The `minimize()` and `maximize()` methods return a criterion, to which you can add constraints, @@ -1018,7 +1020,7 @@ maximum revision number, you would use the following query: [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=aggregate-max-revision-with-entity-example] +include::{example-dir-envers}/QueryAuditTest.java[tags=aggregate-max-revision-with-entity-example] ---- In other words, the result set would contain a list of `Customer` instances, one per primary key. Each instance would @@ -1039,7 +1041,7 @@ First, you must make sure that your entity can track _modification flags_: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-entity-example] +include::{example-dir-envers}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-entity-example] ---- ==== @@ -1051,7 +1053,7 @@ for which the `lastName` property has changed. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-hasChanged-example] +include::{example-dir-envers}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-hasChanged-example] ---- [source, SQL, indent=0] @@ -1071,7 +1073,7 @@ Of course, nothing prevents users from combining `hasChanged` condition with som ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-hasChanged-and-hasNotChanged-example] +include::{example-dir-envers}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-hasChanged-and-hasNotChanged-example] ---- [source, SQL, indent=0] @@ -1088,7 +1090,7 @@ we have to use the `forEntitiesModifiedAtRevision` query: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-at-revision-example] +include::{example-dir-envers}/QueryAuditWithModifiedFlagTest.java[tags=envers-tracking-properties-changes-queries-at-revision-example] ---- [source, SQL, indent=0] @@ -1146,12 +1148,12 @@ This basic query allows retrieving entity names and corresponding Java classes c ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-queries-example1] +include::{example-dir-envers}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-queries-example1] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-queries-example2] +include::{example-dir-envers}/EntityTypeChangeAuditTest.java[tags=envers-tracking-modified-entities-queries-example2] ---- ==== @@ -1192,7 +1194,7 @@ The basis for creating an entity relation join query is as follows: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=envers-querying-entity-relation-inner-join] +include::{example-dir-envers}/QueryAuditTest.java[tags=envers-querying-entity-relation-inner-join] ---- ==== @@ -1201,7 +1203,7 @@ include::{sourcedir}/QueryAuditTest.java[tags=envers-querying-entity-relation-in ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=envers-querying-entity-relation-left-join] +include::{example-dir-envers}/QueryAuditTest.java[tags=envers-querying-entity-relation-left-join] ---- ==== @@ -1215,7 +1217,7 @@ you can use the following query: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditTest.java[tags=envers-querying-entity-relation-join-restriction] +include::{example-dir-envers}/QueryAuditTest.java[tags=envers-querying-entity-relation-join-restriction] ---- [source, SQL, indent=0] @@ -1234,7 +1236,7 @@ with the country attribute of the address property being `România`: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-restriction] +include::{example-dir-envers}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-restriction] ---- [source, SQL, indent=0] @@ -1253,7 +1255,7 @@ having the `address` in `Cluj-Napoca` or the `address` does _not_ have any count ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-join-multiple-restrictions] +include::{example-dir-envers}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-join-multiple-restrictions] ---- [source, SQL, indent=0] @@ -1277,7 +1279,7 @@ where the country name is `România` or that the `Customer` lives in `Cluj-Napoc ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-multiple-restrictions] +include::{example-dir-envers}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-multiple-restrictions] ---- [source, SQL, indent=0] @@ -1295,7 +1297,7 @@ Assuming the `Customer` and the `Address` were previously changed as follows: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-multiple-restrictions-combined-entities] +include::{example-dir-envers}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-multiple-restrictions-combined-entities] ---- ==== @@ -1307,7 +1309,7 @@ where the `city` property of the `address` attribute equals the `name` of the as ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-multiple-restrictions-combined] +include::{example-dir-envers}/QueryAuditAdressCountryTest.java[tags=envers-querying-entity-relation-nested-join-multiple-restrictions-combined] ---- [source, SQL, indent=0] @@ -1418,7 +1420,7 @@ Your opinions on the subject are very welcome on the forum. === Generating Envers schema with Hibernate hbm2ddl tool If you would like to generate the database schema file with Hibernate, -you simply need to use the hbm2ddl too. +you simply need to use the hbm2ddl tool. This task will generate the definitions of all entities, both of those which are audited by Envers and those which are not. @@ -1431,7 +1433,7 @@ For the following entities, Hibernate is going to generate the following databas ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/QueryAuditAdressCountryTest.java[tags=envers-generateschema-example] +include::{example-dir-envers}/QueryAuditAdressCountryTest.java[tags=envers-generateschema-example] ---- [source, SQL, indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/events/Events.adoc b/documentation/src/main/asciidoc/userguide/chapters/events/Events.adoc index 71e18d030867..e8b9c8e638c8 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/events/Events.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/events/Events.adoc @@ -1,6 +1,8 @@ [[events]] -== Interceptors and events -:sourcedir: ../../../../../test/java/org/hibernate/userguide/events +== Interceptors and Events +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-event: {core-project-dir}/src/test/java/org/hibernate/orm/test/events :extrasdir: extras It is useful for the application to react to certain events that occur inside Hibernate. @@ -19,7 +21,7 @@ The following example shows an `Interceptor` implementation that automatically l ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/InterceptorTest.java[tags=events-interceptors-example] +include::{example-dir-event}/InterceptorTest.java[tags=events-interceptors-example] ---- ==== @@ -36,7 +38,7 @@ A Session-scoped interceptor is specified when a session is opened. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/InterceptorTest.java[tags=events-interceptors-session-scope-example] +include::{example-dir-event}/InterceptorTest.java[tags=events-interceptors-session-scope-example] ---- ==== @@ -49,7 +51,7 @@ Ensure that you do not store session-specific states since multiple sessions wil ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/InterceptorTest.java[tags=events-interceptors-session-factory-scope-example] +include::{example-dir-event}/InterceptorTest.java[tags=events-interceptors-session-factory-scope-example] ---- ==== @@ -81,12 +83,12 @@ Here is an example of a custom load event listener: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ListenerTest.java[tags=events-interceptors-load-listener-example-part1] +include::{example-dir-event}/ListenerTest.java[tags=events-interceptors-load-listener-example-part1] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/ListenerTest.java[tags=events-interceptors-load-listener-example-part2] +include::{example-dir-event}/ListenerTest.java[tags=events-interceptors-load-listener-example-part2] ---- ==== @@ -134,7 +136,7 @@ The entity listener class is then associated with the entity using the `jakarta. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ListenerTest.java[tags=events-jpa-callbacks-example] +include::{example-dir-event}/ListenerTest.java[tags=events-jpa-callbacks-example] ---- ==== @@ -172,12 +174,12 @@ Default entity listeners can only be defined in XML mapping files. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultEntityListener.java[tags=events-default-listener-mapping-example] +include::{example-dir-event}/DefaultEntityListener.java[tags=events-default-listener-mapping-example] ---- [source, XML, indent=0] ---- -include::{sourcedir}/DefaultEntityListener-orm.xml[tags=events-default-listener-mapping-example] +include::{example-dir-event}/DefaultEntityListener-orm.xml[tags=events-default-listener-mapping-example] ---- ==== @@ -185,12 +187,12 @@ Considering that all entities extend the `BaseEntity` class: [source, JAVA, indent=0] ---- -include::{sourcedir}/BaseEntity.java[tags=events-default-listener-mapping-example] +include::{example-dir-event}/BaseEntity.java[tags=events-default-listener-mapping-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultEntityListenerTest.java[tags=events-default-listener-mapping-example] +include::{example-dir-event}/DefaultEntityListenerTest.java[tags=events-default-listener-mapping-example] ---- When persisting a `Person` or `Book` entity, the `createdOn` is going to be set by the `onPersist` method of the `DefaultEntityListener`. @@ -200,7 +202,7 @@ When persisting a `Person` or `Book` entity, the `createdOn` is going to be set ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultEntityListenerTest.java[tags=events-default-listener-persist-example] +include::{example-dir-event}/DefaultEntityListenerTest.java[tags=events-default-listener-persist-example] ---- [source, SQL, indent=0] @@ -216,7 +218,7 @@ When updating a `Person` or `Book` entity, the `updatedOn` is going to be set by ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultEntityListenerTest.java[tags=events-default-listener-update-example] +include::{example-dir-event}/DefaultEntityListenerTest.java[tags=events-default-listener-update-example] ---- [source, SQL, indent=0] @@ -241,7 +243,7 @@ while `@ExcludeSuperclassListeners` is used to ignore the default entity listene ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultEntityListenerTest.java[tags=events-exclude-default-listener-mapping-example] +include::{example-dir-event}/DefaultEntityListenerTest.java[tags=events-exclude-default-listener-mapping-example] ---- ==== @@ -254,7 +256,7 @@ because the `Publisher` entity was marked with the `@ExcludeDefaultListeners` an ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DefaultEntityListenerTest.java[tags=events-exclude-default-listener-persist-example] +include::{example-dir-event}/DefaultEntityListenerTest.java[tags=events-exclude-default-listener-persist-example] ---- [source, SQL, indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/fetching/Fetching.adoc b/documentation/src/main/asciidoc/userguide/chapters/fetching/Fetching.adoc index 281b64b97134..844c56bea4de 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/fetching/Fetching.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/fetching/Fetching.adoc @@ -1,6 +1,8 @@ [[fetching]] == Fetching -:sourcedir: ../../../../../test/java/org/hibernate/userguide/fetching +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-fetching: {core-project-dir}/src/test/java/org/hibernate/orm/test/fetching :extrasdir: extras Fetching, essentially, is the process of grabbing data from the database and making it available to the application. @@ -61,7 +63,7 @@ To see the difference between direct fetching and entity queries in regard to ea ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DirectVsQueryFetchingTest.java[tags=fetching-direct-vs-query-domain-model-example] +include::{example-dir-fetching}/DirectVsQueryFetchingTest.java[tags=fetching-direct-vs-query-domain-model-example] ---- ==== @@ -74,7 +76,7 @@ When issuing a direct entity fetch, Hibernate executed the following SQL query: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DirectVsQueryFetchingTest.java[tags=fetching-direct-vs-query-direct-fetching-example] +include::{example-dir-fetching}/DirectVsQueryFetchingTest.java[tags=fetching-direct-vs-query-direct-fetching-example] ---- [source, SQL, indent=0] @@ -92,7 +94,7 @@ On the other hand, if you are using an entity query that does not contain a `JOI ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DirectVsQueryFetchingTest.java[tags=fetching-direct-vs-query-entity-query-example] +include::{example-dir-fetching}/DirectVsQueryFetchingTest.java[tags=fetching-direct-vs-query-entity-query-example] ---- [source, SQL, indent=0] @@ -122,7 +124,7 @@ Let's consider these topics as it relates to a sample domain model and a few use ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FetchingTest.java[tags=fetching-strategies-domain-model-example] +include::{example-dir-fetching}/FetchingTest.java[tags=fetching-strategies-domain-model-example] ---- ==== @@ -146,7 +148,7 @@ Let's assume that login only requires access to the `Employee` information, not ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FetchingTest.java[tags=fetching-strategies-no-fetching-example] +include::{example-dir-fetching}/FetchingTest.java[tags=fetching-strategies-no-fetching-example] ---- ==== @@ -160,7 +162,7 @@ If the login process does not need access to the `Employee` information specific ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FetchingTest.java[tags=fetching-strategies-no-fetching-scalar-example] +include::{example-dir-fetching}/FetchingTest.java[tags=fetching-strategies-no-fetching-scalar-example] ---- ==== @@ -175,7 +177,7 @@ Certainly access to the `Employee` is needed, as is the collection of `Projects` ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FetchingTest.java[tags=fetching-strategies-dynamic-fetching-jpql-example] +include::{example-dir-fetching}/FetchingTest.java[tags=fetching-strategies-dynamic-fetching-jpql-example] ---- ==== @@ -184,7 +186,7 @@ include::{sourcedir}/FetchingTest.java[tags=fetching-strategies-dynamic-fetching ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FetchingTest.java[tags=fetching-strategies-dynamic-fetching-criteria-example] +include::{example-dir-fetching}/FetchingTest.java[tags=fetching-strategies-dynamic-fetching-criteria-example] ---- ==== @@ -208,12 +210,12 @@ Below is a `fetch graph` dynamic fetching example: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-mapping-example] +include::{example-dir-fetching}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-mapping-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-example] +include::{example-dir-fetching}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-example] ---- ==== @@ -242,7 +244,7 @@ and we'd like to fetch the `department` for the `Employee` child association. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-subgraph-mapping-example] +include::{example-dir-fetching}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-subgraph-mapping-example] ---- ==== @@ -253,7 +255,7 @@ When fetching this entity graph, Hibernate generates the following SQL query: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-subgraph-example] +include::{example-dir-fetching}/GraphFetchingTest.java[tags=fetching-strategies-dynamic-fetching-entity-subgraph-example] ---- [source, SQL, indent=0] @@ -302,7 +304,7 @@ the Jakarta Persistence specification proper. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-example-1] +include::{example-dir-fetching}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-example-1] ---- ==== @@ -317,7 +319,7 @@ to the end of the attribute name. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-key-example-1] +include::{example-dir-fetching}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-key-example-1] ---- ==== @@ -325,7 +327,7 @@ include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetc ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-key-example-2] +include::{example-dir-fetching}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-parsing-key-example-2] ---- ==== @@ -376,7 +378,7 @@ the previous example can also be built by combining separate aspect graphs into ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-merging-example] +include::{example-dir-fetching}/GraphParsingTest.java[tags=fetching-strategies-dynamic-fetching-entity-graph-merging-example] ---- ==== @@ -393,12 +395,12 @@ So we would leverage a fetch profile. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ProfileFetchingTest.java[tags=fetching-strategies-dynamic-fetching-profile-mapping-example] +include::{example-dir-fetching}/ProfileFetchingTest.java[tags=fetching-strategies-dynamic-fetching-profile-mapping-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/ProfileFetchingTest.java[tags=fetching-strategies-dynamic-fetching-profile-example] +include::{example-dir-fetching}/ProfileFetchingTest.java[tags=fetching-strategies-dynamic-fetching-profile-example] ---- ==== @@ -419,7 +421,7 @@ Considering the following entity mapping: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchFetchingTest.java[tags=fetching-batch-mapping-example] +include::{example-dir-fetching}/BatchFetchingTest.java[tags=fetching-batch-mapping-example] ---- ==== @@ -432,7 +434,7 @@ the `@BatchSize` annotations allows us to load multiple `Employee` entities in a ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BatchFetchingTest.java[tags=fetching-batch-fetching-example] +include::{example-dir-fetching}/BatchFetchingTest.java[tags=fetching-batch-fetching-example] ---- [source, SQL, indent=0] @@ -453,6 +455,13 @@ most of the time, a DTO projection or a `JOIN FETCH` is a much better alternativ it allows you to fetch all the required data with a single query. ==== +[NOTE] +==== +When `LockModeType` is different from `NONE` Hibernate will not execute a batch fetching so uninitialized entity proxies will not be initialized. + +This because the lock mode is different from the one of the proxies in the batch fetch queue. +==== + [[fetching-fetch-annotation]] === The `@Fetch` annotation mapping @@ -480,7 +489,7 @@ To demonstrate how `FetchMode.SELECT` works, consider the following entity mappi ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FetchModeSelectTest.java[tags=fetching-strategies-fetch-mode-select-mapping-example] +include::{example-dir-fetching}/FetchModeSelectTest.java[tags=fetching-strategies-fetch-mode-select-mapping-example] ---- ==== @@ -493,7 +502,7 @@ collection using a secondary `SELECT` statement upon accessing the child collect ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FetchModeSelectTest.java[tags=fetching-strategies-fetch-mode-select-example] +include::{example-dir-fetching}/FetchModeSelectTest.java[tags=fetching-strategies-fetch-mode-select-example] ---- [source, SQL, indent=0] @@ -516,7 +525,7 @@ To demonstrate how `FetchMode.SUBSELECT` works, we are going to modify the <> +* <> Any other settings prefixed with `hibernate.jndi.` will be collected and passed along to the JNDI provider. diff --git a/documentation/src/main/asciidoc/userguide/chapters/locking/Locking.adoc b/documentation/src/main/asciidoc/userguide/chapters/locking/Locking.adoc index 059cf06c574c..75c8acd82f26 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/locking/Locking.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/locking/Locking.adoc @@ -1,6 +1,8 @@ [[locking]] == Locking -:sourcedir: ../../../../../test/java/org/hibernate/userguide/locking +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-locking: {core-project-dir}/src/test/java/org/hibernate/orm/test/locking :extrasdir: extras In a relational database, locking refers to actions taken to prevent data from changing between the time it is read and the time is used. @@ -55,17 +57,17 @@ However, Hibernate allows you to use even Java 8 Date/Time types, such as `Insta ==== [source,java] ---- -include::{sourcedir}/OptimisticLockingTest.java[tags=locking-optimistic-entity-mapping-example,indent=0] +include::{example-dir-locking}/OptimisticLockingTest.java[tags=locking-optimistic-entity-mapping-example,indent=0] ---- [source,java] ---- -include::{sourcedir}/OptimisticLockingTimestampTest.java[tags=locking-optimistic-entity-mapping-example,indent=0] +include::{example-dir-locking}/OptimisticLockingTimestampTest.java[tags=locking-optimistic-entity-mapping-example,indent=0] ---- [source,java] ---- -include::{sourcedir}/OptimisticLockingInstantTest.java[tags=locking-optimistic-entity-mapping-example,indent=0] +include::{example-dir-locking}/OptimisticLockingInstantTest.java[tags=locking-optimistic-entity-mapping-example,indent=0] ---- ==== @@ -79,7 +81,7 @@ The version number mechanism for optimistic locking is provided through a `@Vers ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/OptimisticLockingTest.java[tags=locking-optimistic-version-number-example,indent=0] +include::{example-dir-locking}/OptimisticLockingTest.java[tags=locking-optimistic-version-number-example,indent=0] ---- ==== @@ -108,7 +110,7 @@ Timestamping is automatically used if you the `@Version` annotation on a `Date` ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/OptimisticLockingTest.java[tags=locking-optimistic-version-timestamp-example,indent=0] +include::{example-dir-locking}/OptimisticLockingTest.java[tags=locking-optimistic-version-timestamp-example,indent=0] ---- ==== @@ -119,7 +121,7 @@ The timestamp can also be generated by the database, instead of by the VM, using ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/VersionSourceTest.java[tags=locking-optimistic-version-timestamp-source-mapping-example,indent=0] +include::{example-dir-locking}/VersionSourceTest.java[tags=locking-optimistic-version-timestamp-source-mapping-example,indent=0] ---- ==== @@ -130,7 +132,7 @@ Now, when persisting a `Person` entity, Hibernate calls the database-specific cu ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/VersionSourceTest.java[tags=locking-optimistic-version-timestamp-source-persist-example,indent=0] +include::{example-dir-locking}/VersionSourceTest.java[tags=locking-optimistic-version-timestamp-source-persist-example,indent=0] ---- [source, SQL,indent=0] @@ -152,7 +154,7 @@ as illustrated in the following example. ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/OptimisticLockTest.java[tags=locking-optimistic-exclude-attribute-mapping-example,indent=0] +include::{example-dir-locking}/OptimisticLockTest.java[tags=locking-optimistic-exclude-attribute-mapping-example,indent=0] ---- ==== @@ -164,7 +166,7 @@ the two concurrent transactions are not going to conflict as illustrated by the ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/OptimisticLockTest.java[tags=locking-optimistic-exclude-attribute-example,indent=0] +include::{example-dir-locking}/OptimisticLockTest.java[tags=locking-optimistic-exclude-attribute-example,indent=0] ---- [source, SQL,indent=0] @@ -218,7 +220,7 @@ There are 4 available OptimisticLockTypes: ==== [source,java] ---- -include::{sourcedir}/OptimisticLockTypeAllTest.java[tag=locking-optimistic-lock-type-all-example,indent=0] +include::{example-dir-locking}/OptimisticLockTypeAllTest.java[tag=locking-optimistic-lock-type-all-example,indent=0] ---- ==== @@ -229,7 +231,7 @@ When you need to modify the `Person` entity above: ==== [source,java] ---- -include::{sourcedir}/OptimisticLockTypeAllTest.java[tag=locking-optimistic-lock-type-all-update-example,indent=0] +include::{example-dir-locking}/OptimisticLockTypeAllTest.java[tag=locking-optimistic-lock-type-all-update-example,indent=0] ---- [source,SQL] @@ -259,7 +261,7 @@ since the entity was loaded in the currently running Persistence Context. ==== [source,java] ---- -include::{sourcedir}/OptimisticLockTypeDirtyTest.java[tag=locking-optimistic-lock-type-dirty-example,indent=0] +include::{example-dir-locking}/OptimisticLockTypeDirtyTest.java[tag=locking-optimistic-lock-type-dirty-example,indent=0] ---- ==== @@ -270,7 +272,7 @@ When you need to modify the `Person` entity above: ==== [source,java] ---- -include::{sourcedir}/OptimisticLockTypeDirtyTest.java[tag=locking-optimistic-lock-type-dirty-update-example,indent=0] +include::{example-dir-locking}/OptimisticLockTypeDirtyTest.java[tag=locking-optimistic-lock-type-dirty-update-example,indent=0] ---- [source,SQL] @@ -354,7 +356,7 @@ The scope can either be `NORMAL` (default value) or `EXTENDED`. The `EXTENDED` s ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/ExplicitLockingTest.java[tags=locking-jpa-query-hints-timeout-example,indent=0] +include::{example-dir-locking}/ExplicitLockingTest.java[tags=locking-jpa-query-hints-timeout-example,indent=0] ---- [source, SQL,indent=0] @@ -384,7 +386,7 @@ The following example shows how to obtain a shared database lock. ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/ExplicitLockingTest.java[tags=locking-session-lock-example,indent=0] +include::{example-dir-locking}/ExplicitLockingTest.java[tags=locking-session-lock-example,indent=0] ---- [source, SQL,indent=0] @@ -410,7 +412,7 @@ For this reason, Hibernate uses secondary selects to lock the previously fetched ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/ExplicitLockingTest.java[tags=locking-follow-on-example,indent=0] +include::{example-dir-locking}/ExplicitLockingTest.java[tags=locking-follow-on-example,indent=0] ---- [source, SQL,indent=0] @@ -429,7 +431,7 @@ To avoid the N+1 query problem, a separate query can be used to apply the lock u ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/ExplicitLockingTest.java[tags=locking-follow-on-secondary-query-example,indent=0] +include::{example-dir-locking}/ExplicitLockingTest.java[tags=locking-follow-on-secondary-query-example,indent=0] ---- [source, SQL,indent=0] @@ -450,7 +452,7 @@ Even more important is that you can overrule the default follow-on-locking detec ==== [source, JAVA,indent=0] ---- -include::{sourcedir}/ExplicitLockingTest.java[tags=locking-follow-on-explicit-example,indent=0] +include::{example-dir-locking}/ExplicitLockingTest.java[tags=locking-follow-on-explicit-example,indent=0] ---- [source, SQL,indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/multitenancy/MultiTenancy.adoc b/documentation/src/main/asciidoc/userguide/chapters/multitenancy/MultiTenancy.adoc index 057aa1df1e3e..b30f83d1d12e 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/multitenancy/MultiTenancy.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/multitenancy/MultiTenancy.adoc @@ -1,6 +1,8 @@ [[multitenacy]] == Multitenancy -:sourcedir: ../../../../../test/java/org/hibernate/userguide/multitenancy +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-multitenancy: {core-project-dir}/src/test/java/org/hibernate/orm/test/multitenancy :extrasdir: extras [[multitenacy-intro]] @@ -69,7 +71,7 @@ The API is really just defined by passing the tenant identifier as part of openi ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-session-example] +include::{example-dir-multitenancy}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-session-example] ---- ==== @@ -79,7 +81,7 @@ include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-se For the partitioned data approach, each entity representing partitioned data must declare a field annotated `@TenantId`. -[[multitenacy-hibernate-MultiTenantConnectionProvider-example]] +[[multitenacy-hibernate-TenantId-example]] .A `@TenantId` usage example ==== [source, JAVA, indent=0] @@ -128,7 +130,7 @@ The following example portrays a `MultiTenantConnectionProvider` implementation ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ConfigurableMultiTenantConnectionProvider.java[tags=multitenacy-hibernate-ConfigurableMultiTenantConnectionProvider-example] +include::{example-dir-multitenancy}/ConfigurableMultiTenantConnectionProvider.java[tags=multitenacy-hibernate-ConfigurableMultiTenantConnectionProvider-example] ---- ==== @@ -139,7 +141,7 @@ The `ConfigurableMultiTenantConnectionProvider` can be set up as follows: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-MultiTenantConnectionProvider-example] +include::{example-dir-multitenancy}/AbstractMultiTenancyTest.java[tags=multitenacy-hibernate-MultiTenantConnectionProvider-example] ---- ==== @@ -150,7 +152,7 @@ When using multitenancy, it's possible to save an entity with the same identifie ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/AbstractMultiTenancyTest.java[tags=multitenacy-multitenacy-hibernate-same-entity-example] +include::{example-dir-multitenancy}/AbstractMultiTenancyTest.java[tags=multitenacy-multitenacy-hibernate-same-entity-example] ---- ==== @@ -196,7 +198,7 @@ For instance, each tenant could specify a different time zone configuration. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-registerConnectionProvider-call-example] +include::{example-dir-multitenancy}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-registerConnectionProvider-call-example] ---- ==== @@ -207,7 +209,7 @@ The `registerConnectionProvider` method is used to define the tenant-specific co ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-registerConnectionProvider-example] +include::{example-dir-multitenancy}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-registerConnectionProvider-example] ---- ==== @@ -215,7 +217,7 @@ For our example, the tenant-specific context is held in the `connectionProviderM [source, JAVA, indent=0] ---- -include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-context-example] +include::{example-dir-multitenancy}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-context-example] ---- Now, when building the Hibernate `Session`, aside from passing the tenant identifier, @@ -226,7 +228,7 @@ we could also configure the `Session` to use the tenant-specific time zone. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-session-example] +include::{example-dir-multitenancy}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-timezone-configuration-session-example] ---- ==== @@ -239,7 +241,7 @@ even if the currently running JVM uses a different time zone. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-applying-timezone-configuration-example] +include::{example-dir-multitenancy}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-applying-timezone-configuration-example] ---- ==== @@ -252,7 +254,7 @@ test output. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-not-applying-timezone-configuration-example] +include::{example-dir-multitenancy}/DatabaseTimeZoneMultiTenancyTest.java[tags=multitenacy-hibernate-not-applying-timezone-configuration-example] ---- [source, SQL,indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/pc/BytecodeEnhancement.adoc b/documentation/src/main/asciidoc/userguide/chapters/pc/BytecodeEnhancement.adoc index 406167271f38..d25b7ec4dc48 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/pc/BytecodeEnhancement.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/pc/BytecodeEnhancement.adoc @@ -1,6 +1,8 @@ [[BytecodeEnhancement]] === Bytecode Enhancement -:sourcedir: ../../../../../test/java/org/hibernate/userguide/pc +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-enhancement: {core-project-dir}/src/test/java/org/hibernate/orm/test/pc Hibernate "grew up" not supporting bytecode enhancement at all. At that time, Hibernate only supported proxy-based alternative for lazy loading and always used diff-based dirty calculation. @@ -29,7 +31,7 @@ This behavior is explicitly controllable through the `@org.hibernate.annotations ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-lazy-loading-example] +include::{example-dir-enhancement}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-lazy-loading-example] ---- ==== @@ -67,7 +69,7 @@ Consider a domain model with a normal `Person`/`Book` bidirectional association: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-dirty-tracking-bidirectional-example] +include::{example-dir-enhancement}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-dirty-tracking-bidirectional-example] ---- ==== @@ -76,7 +78,7 @@ include::{sourcedir}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-dirty ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-dirty-tracking-bidirectional-incorrect-usage-example] +include::{example-dir-enhancement}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-dirty-tracking-bidirectional-incorrect-usage-example] ---- ==== @@ -87,7 +89,7 @@ This blows up in normal Java usage. The correct normal Java usage is: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-dirty-tracking-bidirectional-correct-usage-example] +include::{example-dir-enhancement}/BytecodeEnhancementTest.java[tags=BytecodeEnhancement-dirty-tracking-bidirectional-correct-usage-example] ---- ==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/pc/PersistenceContext.adoc b/documentation/src/main/asciidoc/userguide/chapters/pc/PersistenceContext.adoc index c455168662c5..e59a75bf06ad 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/pc/PersistenceContext.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/pc/PersistenceContext.adoc @@ -1,7 +1,10 @@ [[pc]] == Persistence Context -:sourcedir: ../../../../../test/java/org/hibernate/userguide/pc -:sourcedir-caching: ../../../../../test/java/org/hibernate/userguide/caching +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-pc: {core-project-dir}/src/test/java/org/hibernate/orm/test/pc +:jcache-project-dir: {root-project-dir}/hibernate-jcache +:example-dir-caching: {jcache-project-dir}/src/test/java/org/hibernate/orm/test/caching :extrasdir: extras Both the `org.hibernate.Session` API and `jakarta.persistence.EntityManager` API represent a context for dealing with persistent data. @@ -27,7 +30,7 @@ Jakarta Persistence defines an incredibly useful method to allow applications ac ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-unwrap-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-unwrap-example] ---- ==== @@ -44,7 +47,7 @@ You can make it persistent by associating it to either an `org.hibernate.Session ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-persist-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-persist-jpa-example] ---- ==== @@ -53,7 +56,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-persist-jpa-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-persist-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-persist-native-example] ---- ==== @@ -73,7 +76,7 @@ Entities can also be deleted. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-remove-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-remove-jpa-example] ---- ==== @@ -82,7 +85,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-remove-jpa-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-remove-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-remove-native-example] ---- ==== @@ -106,7 +109,7 @@ The most common case being the need to create an association between an entity a ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-get-reference-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-get-reference-jpa-example] ---- ==== @@ -115,7 +118,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-get-reference-jpa-examp ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-get-reference-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-get-reference-native-example] ---- ==== @@ -137,7 +140,7 @@ It is also quite common to want to obtain an entity along with its data (e.g. li ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-jpa-example] ---- ==== @@ -147,7 +150,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-jpa-example] [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-native-example] ---- ==== @@ -157,7 +160,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-native-example] [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-by-id-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-by-id-native-example] ---- ==== @@ -171,7 +174,7 @@ It's possible to return a Java 8 `Optional` as well: [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-optional-by-id-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-optional-by-id-native-example] ---- ==== @@ -241,7 +244,7 @@ as illustrated by the following example: [source, JAVA, indent=0] ---- -include::{sourcedir}/MultiLoadIdTest.java[tags=pc-by-multiple-ids-example] +include::{example-dir-pc}/MultiLoadIdTest.java[tags=pc-by-multiple-ids-example] ---- [source, SQL, indent=0] @@ -262,7 +265,7 @@ https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/ ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/MultiLoadIdTest.java[tags=pc-by-multiple-ids-second-level-cache-example] +include::{example-dir-pc}/MultiLoadIdTest.java[tags=pc-by-multiple-ids-second-level-cache-example] ---- ==== @@ -289,7 +292,7 @@ In addition to allowing to load the entity by its identifier, Hibernate allows a ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-by-natural-id-entity-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-by-natural-id-entity-example] ---- ==== @@ -300,7 +303,7 @@ We can also opt to fetch the entity or just retrieve a reference to it when usin ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-by-simple-natural-id-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-by-simple-natural-id-example] ---- ==== @@ -309,7 +312,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-by-simple-natural- ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-by-natural-id-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-by-natural-id-example] ---- ==== @@ -320,7 +323,7 @@ We can also use a Java 8 `Optional` to load an entity by its natural id: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-find-optional-by-simple-natural-id-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-find-optional-by-simple-natural-id-example] ---- ==== @@ -357,7 +360,7 @@ This can be achieved using the `@Where` annotation, which can be applied to enti ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/WhereTest.java[tags=pc-where-example] +include::{example-dir-pc}/WhereTest.java[tags=pc-where-example] ---- ==== @@ -368,7 +371,7 @@ If the database contains the following entities: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/WhereTest.java[tags=pc-where-persistence-example] +include::{example-dir-pc}/WhereTest.java[tags=pc-where-persistence-example] ---- [source, SQL, indent=0] @@ -384,7 +387,7 @@ When executing an `Account` entity query, Hibernate is going to filter out all r ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/WhereTest.java[tags=pc-where-entity-query-example] +include::{example-dir-pc}/WhereTest.java[tags=pc-where-entity-query-example] ---- [source, SQL, indent=0] @@ -400,7 +403,7 @@ When fetching the `debitAccounts` or the `creditAccounts` collections, Hibernate ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/WhereTest.java[tags=pc-where-collection-query-example] +include::{example-dir-pc}/WhereTest.java[tags=pc-where-collection-query-example] ---- [source, SQL, indent=0] @@ -419,7 +422,7 @@ Just like `@Where` annotation, `@WhereJoinTable` is used to filter out collectio ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/WhereJoinTableTest.java[tags=pc-where-join-table-example] +include::{example-dir-pc}/WhereJoinTableTest.java[tags=pc-where-join-table-example] ---- [source, SQL, indent=0] @@ -438,7 +441,7 @@ Considering that the following two `Book_Reader` entries are added into our syst ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/WhereJoinTableTest.java[tags=pc-where-join-table-persist-example] +include::{example-dir-pc}/WhereJoinTableTest.java[tags=pc-where-join-table-persist-example] ---- ==== @@ -449,7 +452,7 @@ When fetching the `currentWeekReaders` collection, Hibernate is going to find on ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/WhereJoinTableTest.java[tags=pc-where-join-table-fetch-example] +include::{example-dir-pc}/WhereJoinTableTest.java[tags=pc-where-join-table-fetch-example] ---- ==== @@ -466,7 +469,7 @@ Now, considering we have the following `Account` entity: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-filter-Account-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-filter-Account-example] ---- ==== @@ -484,7 +487,7 @@ As already explained, we can also apply the `@Filter` annotation for collections ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-filter-Client-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-filter-Client-example] ---- ==== @@ -496,7 +499,7 @@ Hibernate will execute the following SQL statements: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-filter-persistence-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-filter-persistence-example] ---- [source, SQL, indent=0] @@ -512,7 +515,7 @@ By default, without explicitly enabling the filter, Hibernate is going to fetch ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-no-filter-entity-query-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-no-filter-entity-query-example] ---- [source, SQL, indent=0] @@ -529,7 +532,7 @@ then Hibernate is going to apply the filtering criteria to the associated `Accou ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-filter-entity-query-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-filter-entity-query-example] ---- [source, SQL, indent=0] @@ -540,15 +543,15 @@ include::{extrasdir}/pc-filter-entity-query-example.sql[] [IMPORTANT] ==== -Filters apply to entity queries and to direct fetching. +Filters apply to entity queries, but not to direct fetching. -Therefore, even in the following example, the filter is taken into consideration when fetching an entity from the Persistence Context. +Therefore, in the following example, the filter is not taken into consideration when fetching an entity from the Persistence Context. [[pc-filter-entity-example]] .Fetching entities mapped with `@Filter` [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-filter-entity-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-filter-entity-example] ---- [source, SQL, indent=0] @@ -566,7 +569,7 @@ Just like with entity queries, collections can be filtered as well, but only if ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-no-filter-collection-query-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-no-filter-collection-query-example] ---- [source, SQL, indent=0] @@ -582,7 +585,7 @@ When activating the `@Filter` and fetching the `accounts` collections, Hibernate ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterTest.java[tags=pc-filter-collection-query-example] +include::{example-dir-pc}/FilterTest.java[tags=pc-filter-collection-query-example] ---- [source, SQL, indent=0] @@ -620,7 +623,7 @@ if the `@Filter` defines a condition that uses predicates across multiple tables ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterSqlFragementAliasTest.java[tags=pc-filter-sql-fragment-alias-example] +include::{example-dir-pc}/FilterSqlFragementAliasTest.java[tags=pc-filter-sql-fragment-alias-example] ---- ==== @@ -632,7 +635,7 @@ Hibernate is going to apply the right table aliases to the filter predicates: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterSqlFragementAliasTest.java[tags=pc-filter-sql-fragment-alias-query-example] +include::{example-dir-pc}/FilterSqlFragementAliasTest.java[tags=pc-filter-sql-fragment-alias-query-example] ---- [source, SQL, indent=0] @@ -654,7 +657,7 @@ The `@FilterJoinTable` annotation can be, therefore, applied to a unidirectional ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterJoinTableTest.java[tags=pc-filter-join-table-example] +include::{example-dir-pc}/FilterJoinTableTest.java[tags=pc-filter-join-table-example] ---- ==== @@ -669,7 +672,7 @@ Let's assume our database contains the following entities: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterJoinTableTest.java[tags=pc-filter-join-table-persistence-example] +include::{example-dir-pc}/FilterJoinTableTest.java[tags=pc-filter-join-table-persistence-example] ---- [source, SQL, indent=0] @@ -685,7 +688,7 @@ The collections can be filtered only if the associated filter is enabled on the ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterJoinTableTest.java[tags=pc-no-filter-join-table-collection-query-example] +include::{example-dir-pc}/FilterJoinTableTest.java[tags=pc-no-filter-join-table-collection-query-example] ---- [source, SQL, indent=0] @@ -702,7 +705,7 @@ If we enable the filter and set the `maxOrderId` to `1` when fetching the `accou ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/FilterJoinTableTest.java[tags=pc-filter-join-table-collection-query-example] +include::{example-dir-pc}/FilterJoinTableTest.java[tags=pc-filter-join-table-collection-query-example] ---- [source, SQL, indent=0] @@ -722,7 +725,7 @@ There is no need to call a particular method to make your modifications persiste ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-managed-state-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-managed-state-jpa-example] ---- ==== @@ -731,7 +734,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-managed-state-jpa-examp ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-managed-state-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-managed-state-native-example] ---- ==== @@ -744,7 +747,7 @@ Therefore, considering you have the following `Product` entity mapping: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/NoDynamicUpdateTest.java[tags=pc-managed-state-update-mapping-example] +include::{example-dir-pc}/NoDynamicUpdateTest.java[tags=pc-managed-state-update-mapping-example] ---- ==== @@ -755,7 +758,7 @@ If you persist the following `Product` entity: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/NoDynamicUpdateTest.java[tags=pc-managed-state-update-persist-example] +include::{example-dir-pc}/NoDynamicUpdateTest.java[tags=pc-managed-state-update-persist-example] ---- ==== @@ -766,7 +769,7 @@ When you modify the `Product` entity, Hibernate generates the following SQL UPDA ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/NoDynamicUpdateTest.java[tags=pc-managed-state-update-example] +include::{example-dir-pc}/NoDynamicUpdateTest.java[tags=pc-managed-state-update-example] ---- [source, SQL, indent=0] @@ -795,7 +798,7 @@ To enable dynamic updates, you need to annotate the entity with the `@DynamicUpd ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/DynamicUpdateTest.java[tags=pc-managed-state-dynamic-update-mapping-example] +include::{example-dir-pc}/DynamicUpdateTest.java[tags=pc-managed-state-dynamic-update-mapping-example] ---- ==== @@ -822,7 +825,7 @@ You can reload an entity instance and its collections at any time. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-refresh-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-refresh-jpa-example] ---- ==== @@ -831,7 +834,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-refresh-jpa-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-refresh-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-refresh-native-example] ---- ==== @@ -876,7 +879,7 @@ For instance, consider the following example: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-refresh-child-entity-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-refresh-child-entity-jpa-example] ---- ==== @@ -912,7 +915,7 @@ Jakarta Persistence does not support reattaching detached data. This is only ava ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-detach-reattach-lock-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-detach-reattach-lock-example] ---- ==== @@ -921,7 +924,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-detach-reattach-lock-ex ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-detach-reattach-saveOrUpdate-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-detach-reattach-saveOrUpdate-example] ---- ==== @@ -947,7 +950,7 @@ Although not exactly per se, the following example is a good visualization of th ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-merge-visualize-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-merge-visualize-example] ---- ==== @@ -956,7 +959,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-merge-visualize-example ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-merge-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-merge-jpa-example] ---- ==== @@ -965,7 +968,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-merge-jpa-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-merge-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-merge-native-example] ---- ==== @@ -1038,7 +1041,7 @@ An application can verify the state of entities and collections in relation to t ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-contains-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-contains-jpa-example] ---- ==== @@ -1047,7 +1050,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-contains-jpa-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-contains-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-contains-native-example] ---- ==== @@ -1056,7 +1059,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-contains-native-example ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-verify-lazy-jpa-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-verify-lazy-jpa-example] ---- ==== @@ -1065,7 +1068,7 @@ include::{sourcedir}/PersistenceContextTest.java[tags=pc-verify-lazy-jpa-example ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-verify-lazy-native-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-verify-lazy-native-example] ---- ==== @@ -1076,7 +1079,7 @@ In Jakarta Persistence there is an alternative means to check laziness using the ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersistenceContextTest.java[tags=pc-verify-lazy-jpa-alternative-example] +include::{example-dir-pc}/PersistenceContextTest.java[tags=pc-verify-lazy-jpa-alternative-example] ---- ==== @@ -1091,7 +1094,7 @@ the `evict()` method can be used to remove the object and its collections from t ==== [source, JAVA, indent=0] ---- -include::{sourcedir-caching}/FirstLevelCacheTest.java[tags=caching-management-jpa-detach-example] +include::{example-dir-caching}/FirstLevelCacheTest.java[tags=caching-management-jpa-detach-example] ---- ==== @@ -1100,7 +1103,7 @@ include::{sourcedir-caching}/FirstLevelCacheTest.java[tags=caching-management-jp ==== [source, JAVA, indent=0] ---- -include::{sourcedir-caching}/FirstLevelCacheTest.java[tags=caching-management-native-evict-example] +include::{example-dir-caching}/FirstLevelCacheTest.java[tags=caching-management-native-evict-example] ---- ==== @@ -1111,7 +1114,7 @@ To detach all entities from the current persistence context, both the `EntityMan ==== [source, JAVA, indent=0] ---- -include::{sourcedir-caching}/FirstLevelCacheTest.java[tags=caching-management-clear-example] +include::{example-dir-caching}/FirstLevelCacheTest.java[tags=caching-management-clear-example] ---- ==== @@ -1122,7 +1125,7 @@ To verify if an entity instance is currently attached to the running persistence ==== [source, JAVA, indent=0] ---- -include::{sourcedir-caching}/FirstLevelCacheTest.java[tags=caching-management-contains-example] +include::{example-dir-caching}/FirstLevelCacheTest.java[tags=caching-management-contains-example] ---- ==== @@ -1149,9 +1152,9 @@ The following examples will explain some of the aforementioned cascade operation [source, JAVA, indent=0] ---- -include::{sourcedir}/Person.java[tags=pc-cascade-domain-model-example] +include::{example-dir-pc}/Person.java[tags=pc-cascade-domain-model-example] -include::{sourcedir}/Phone.java[tags=pc-cascade-domain-model-example] +include::{example-dir-pc}/Phone.java[tags=pc-cascade-domain-model-example] ---- [[pc-cascade-persist]] @@ -1164,7 +1167,7 @@ The `CascadeType.PERSIST` allows us to persist a child entity along with the par ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadePersistTest.java[tags=pc-cascade-persist-example] +include::{example-dir-pc}/CascadePersistTest.java[tags=pc-cascade-persist-example] ---- [source, SQL, indent=0] @@ -1185,7 +1188,7 @@ The `CascadeType.MERGE` allows us to merge a child entity along with the parent ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeMergeTest.java[tags=pc-cascade-merge-example] +include::{example-dir-pc}/CascadeMergeTest.java[tags=pc-cascade-merge-example] ---- [source, SQL, indent=0] @@ -1209,7 +1212,7 @@ However, `CascadeType.REMOVE` and `org.hibernate.annotations.CascadeType.DELETE` ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeRemoveTest.java[tags=pc-cascade-remove-example] +include::{example-dir-pc}/CascadeRemoveTest.java[tags=pc-cascade-remove-example] ---- [source, SQL, indent=0] @@ -1228,7 +1231,7 @@ include::{extrasdir}/pc-cascade-remove-example.sql[] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeDetachTest.java[tags=pc-cascade-detach-example] +include::{example-dir-pc}/CascadeDetachTest.java[tags=pc-cascade-detach-example] ---- ==== @@ -1245,7 +1248,7 @@ However, `CascadeType.LOCK` allows us to reattach a parent entity along with its ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeLockTest.java[tags=pc-cascade-lock-example] +include::{example-dir-pc}/CascadeLockTest.java[tags=pc-cascade-lock-example] ---- ==== @@ -1260,7 +1263,7 @@ The refresh operation will discard the current entity state, and it will overrid ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeRefreshTest.java[tags=pc-cascade-refresh-example] +include::{example-dir-pc}/CascadeRefreshTest.java[tags=pc-cascade-refresh-example] ---- [source, SQL, indent=0] @@ -1282,7 +1285,7 @@ The replicate operation allows you to synchronize entities coming from different ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeReplicateTest.java[tags=pc-cascade-replicate-example] +include::{example-dir-pc}/CascadeReplicateTest.java[tags=pc-cascade-replicate-example] ---- [source, SQL, indent=0] @@ -1308,12 +1311,12 @@ as illustrated by the following example. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-mapping-Person-example] +include::{example-dir-pc}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-mapping-Person-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-mapping-Phone-example] +include::{example-dir-pc}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-mapping-Phone-example] ---- [source, SQL, indent=0] @@ -1329,7 +1332,7 @@ Now, you can just remove the `Person` entity, and the associated `Phone` entitie ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-example] +include::{example-dir-pc}/CascadeOnDeleteTest.java[tags=pc-cascade-on-delete-example] ---- [source, SQL, indent=0] @@ -1346,12 +1349,12 @@ illustrated in the following example. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-mapping-Person-example] +include::{example-dir-pc}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-mapping-Person-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-mapping-Phone-example] +include::{example-dir-pc}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-mapping-Phone-example] ---- ==== @@ -1362,7 +1365,7 @@ Now, when removing the `Person` entity, all the associated `Phone` child entitie ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-example] +include::{example-dir-pc}/CascadeOnDeleteCollectionTest.java[tags=pc-cascade-on-delete-collection-example] ---- [source, SQL, indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/query/criteria/Criteria.adoc b/documentation/src/main/asciidoc/userguide/chapters/query/criteria/Criteria.adoc index d039a026e814..ecb8037c56ab 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/query/criteria/Criteria.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/query/criteria/Criteria.adoc @@ -1,6 +1,8 @@ [[criteria]] == Criteria -:sourcedir: ../../../../../../test/java/org/hibernate/userguide/criteria +:root-project-dir: ../../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-criteria: {core-project-dir}/src/test/java/org/hibernate/orm/test/query/criteria Criteria queries offer a type-safe alternative to HQL, JPQL and native SQL queries. @@ -49,7 +51,7 @@ The application wants to select entity instances. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-typedquery-entity-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-typedquery-entity-example] ---- ==== @@ -77,7 +79,7 @@ But this expression might also represent an aggregation, a mathematical operatio ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-typedquery-expression-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-typedquery-expression-example] ---- ==== @@ -97,7 +99,7 @@ or consider a wrapper query, see <> for details. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-typedquery-multiselect-array-explicit-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-typedquery-multiselect-array-explicit-example] ---- ==== @@ -111,7 +113,7 @@ The example then uses the array method of `jakarta.persistence.criteria.Criteria ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-typedquery-multiselect-array-implicit-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-typedquery-multiselect-array-implicit-example] ---- ==== @@ -131,9 +133,9 @@ Going back to the example query there, rather than returning an array of _[Perso ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/PersonWrapper.java[tags=criteria-typedquery-wrapper-example, indent=0] +include::{example-dir-criteria}/PersonWrapper.java[tags=criteria-typedquery-wrapper-example, indent=0] -include::{sourcedir}/CriteriaTest.java[tags=criteria-typedquery-wrapper-example, indent=0] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-typedquery-wrapper-example, indent=0] ---- ==== @@ -155,7 +157,7 @@ A better approach to <> is to use either a wrap ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-tuple-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-tuple-example] ---- ==== @@ -218,7 +220,7 @@ A root is always an entity type. Roots are defined and added to the criteria by ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-from-root-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-from-root-example] ---- ==== @@ -230,7 +232,7 @@ Here is an example defining a Cartesian Product between `Person` and `Partner` e ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-from-multiple-root-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-from-multiple-root-example] ---- ==== @@ -245,7 +247,7 @@ Joins are created by the numerous overloaded __join__ methods of the `jakarta.pe ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-from-join-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-from-join-example] ---- ==== @@ -260,7 +262,7 @@ Fetches are created by the numerous overloaded __fetch__ methods of the `jakarta ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-from-fetch-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-from-fetch-example] ---- ==== @@ -286,7 +288,7 @@ Roots, joins and fetches are themselves path expressions as well. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-param-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-param-example] ---- ==== @@ -301,6 +303,6 @@ Then use the parameter reference to bind the parameter value to the `jakarta.per ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CriteriaTest.java[tags=criteria-group-by-example] +include::{example-dir-criteria}/CriteriaTest.java[tags=criteria-group-by-example] ---- ==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/query/hql/Query.adoc b/documentation/src/main/asciidoc/userguide/chapters/query/hql/Query.adoc index 4dda3dcc3256..d009bab1c41a 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/query/hql/Query.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/query/hql/Query.adoc @@ -1,7 +1,10 @@ [[hql]] == Java API for HQL and JPQL -:modeldir: ../../../../../../main/java/org/hibernate/userguide/model -:sourcedir: ../../../../../../test/java/org/hibernate/userguide/hql +:root-project-dir: ../../../../../../../.. +:testing-project-dir: {root-project-dir}/hibernate-testing +:example-dir-model: {testing-project-dir}/src/main/java/org/hibernate/testing/orm/domain/userguide +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-query: {core-project-dir}/src/test/java/org/hibernate/orm/test/hql :extrasdir: extras The Hibernate Query Language (HQL) and the Java Persistence Query Language (JPQL) are object-oriented query languages based on SQL and very similar in flavor to SQL. @@ -37,23 +40,23 @@ The code examples featured in this chapter, and the next, make use of the follow ==== [source, JAVA, indent=0] ---- -include::{modeldir}/Person.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/Person.java[tags=hql-examples-domain-model-example] -include::{modeldir}/AddressType.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/AddressType.java[tags=hql-examples-domain-model-example] -include::{modeldir}/Partner.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/Partner.java[tags=hql-examples-domain-model-example] -include::{modeldir}/Phone.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/Phone.java[tags=hql-examples-domain-model-example] -include::{modeldir}/PhoneType.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/PhoneType.java[tags=hql-examples-domain-model-example] -include::{modeldir}/Call.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/Call.java[tags=hql-examples-domain-model-example] -include::{modeldir}/Payment.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/Payment.java[tags=hql-examples-domain-model-example] -include::{modeldir}/CreditCardPayment.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/CreditCardPayment.java[tags=hql-examples-domain-model-example] -include::{modeldir}/WireTransferPayment.java[tags=hql-examples-domain-model-example] +include::{example-dir-model}/WireTransferPayment.java[tags=hql-examples-domain-model-example] ---- ==== @@ -79,7 +82,7 @@ Named queries may be defined using the Jakarta Persistence annotation `@NamedQue ==== [source, JAVA, indent=0] ---- -include::{modeldir}/Person.java[tags=jpa-read-only-entities-native-example] +include::{example-dir-model}/Person.java[tags=jpa-read-only-entities-native-example] ---- ==== @@ -92,7 +95,7 @@ which allows the specification of additional properties of the query, including ==== [source, JAVA, indent=0] ---- -include::{modeldir}/Phone.java[tags=jpql-api-hibernate-named-query-example, indent=0] +include::{example-dir-model}/Phone.java[tags=jpql-api-hibernate-named-query-example, indent=0] ---- //include::{sourcedir}/HQLTest.java[tags=jpql-api-hibernate-named-query-example, indent=0] ==== @@ -126,7 +129,7 @@ That way, you'll obtain a `TypedQuery`, and avoid some later typecasting. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-example] ---- ==== @@ -135,9 +138,9 @@ include::{sourcedir}/HQLTest.java[tags=jpql-api-example] ==== [source, JAVA, indent=0] ---- -include::{modeldir}/Person.java[tags=jpql-api-named-query-example, indent=0] +include::{example-dir-model}/Person.java[tags=jpql-api-named-query-example, indent=0] -include::{sourcedir}/HQLTest.java[tags=jpql-api-named-query-example, indent=0] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-named-query-example, indent=0] ---- ==== @@ -161,7 +164,7 @@ Hibernate's `Query` interface offers additional operations not available via `Ty ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-api-example] +include::{example-dir-query}/HQLTest.java[tags=hql-api-example] ---- ==== @@ -170,7 +173,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-api-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-api-named-query-example] +include::{example-dir-query}/HQLTest.java[tags=hql-api-named-query-example] ---- ==== @@ -194,7 +197,7 @@ If the query has parameters, arguments must be bound to each parameter before th ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-parameter-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-parameter-example] ---- ==== @@ -206,7 +209,7 @@ Just like with named parameters, a ordinal parameter may appear multiple times i ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-ordinal-parameter-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-ordinal-parameter-example] ---- ==== @@ -230,7 +233,7 @@ The `Query` interface is used to control the execution of the query. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-list-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-list-example] ---- ==== @@ -239,7 +242,7 @@ include::{sourcedir}/HQLTest.java[tags=jpql-api-list-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-single-result-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-single-result-example] ---- ==== @@ -248,7 +251,7 @@ include::{sourcedir}/HQLTest.java[tags=jpql-api-single-result-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-stream-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-stream-example] ---- ==== @@ -268,7 +271,7 @@ The very important methods `Query#setMaxResults()` and `Query#setFirstResult()` ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-basic-usage-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-basic-usage-example] ---- ==== @@ -283,7 +286,7 @@ For example, we may want to specify an execution timeout or control caching. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-api-hint-usage-example] +include::{example-dir-query}/HQLTest.java[tags=jpql-api-hint-usage-example] ---- ==== @@ -352,7 +355,7 @@ For complete details, see the https://docs.jboss.org/hibernate/orm/{majorMinorVe ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-api-basic-usage-example] +include::{example-dir-query}/HQLTest.java[tags=hql-api-basic-usage-example] ---- ==== @@ -361,17 +364,6 @@ include::{sourcedir}/HQLTest.java[tags=hql-api-basic-usage-example] A program may hook into the process of building the query results by providing a `org.hibernate.transform.ResultListTransformer` or `org.hibernate.transform.TupleTransformer`. -Hibernate provides several some built-in implementations of these interfaces, for example: - -[[hql-api-result-transformers-example]] -.Using a `ResultListTransformer` -==== -[source, JAVA, indent=0] ----- -include::{sourcedir}/SelectDistinctTest.java[tags=hql-distinct-entity-resulttransformer-example] ----- -==== - See the https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/transform/ResultListTransformer.html[Javadocs] along with the built-in implementations for additional details. //[[hql-api-parameters]] @@ -470,7 +462,7 @@ Read-only entities are skipped by the dirty checking mechanism as illustrated by ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-read-only-entities-example] +include::{example-dir-query}/HQLTest.java[tags=hql-read-only-entities-example] ---- [source, SQL, indent=0] @@ -488,7 +480,7 @@ The method `Query#setReadOnly()` is an alternative to using a Jakarta Persistenc ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-read-only-entities-native-example] +include::{example-dir-query}/HQLTest.java[tags=hql-read-only-entities-native-example] ---- ==== @@ -508,7 +500,7 @@ Depending on the specified `ScrollMode`, and on the capabilities of the JDBC dri ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-api-scroll-example] +include::{example-dir-query}/HQLTest.java[tags=hql-api-scroll-example] ---- ==== @@ -541,7 +533,7 @@ For that, use `getResultList().stream()`. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-api-stream-projection-example] +include::{example-dir-query}/HQLTest.java[tags=hql-api-stream-projection-example] ---- ==== @@ -550,7 +542,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-api-stream-projection-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-api-stream-example] +include::{example-dir-query}/HQLTest.java[tags=hql-api-stream-example] ---- ==== @@ -593,9 +585,9 @@ it does not expose a `#executeUpdate` method. This allows for earlier validatio ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SelectionQueryExampleTests.java[tags=example-hql-selection-query] +include::{example-dir-query}/SelectionQueryExampleTests.java[tags=example-hql-selection-query] -include::{sourcedir}/SelectionQueryExampleTests.java[tags=example-hql-selection-query-query] +include::{example-dir-query}/SelectionQueryExampleTests.java[tags=example-hql-selection-query-query] ---- ==== @@ -606,9 +598,9 @@ include::{sourcedir}/SelectionQueryExampleTests.java[tags=example-hql-selection- ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SelectionQueryExampleTests.java[tags=example-hql-named-selection-query] +include::{example-dir-query}/SelectionQueryExampleTests.java[tags=example-hql-named-selection-query] -include::{sourcedir}/SelectionQueryExampleTests.java[tags=example-hql-named-selection-query-query] +include::{example-dir-query}/SelectionQueryExampleTests.java[tags=example-hql-named-selection-query-query] ---- ==== @@ -626,9 +618,9 @@ For example, in terms of execution, it only exposes `#executeUpdate` method. Th ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/MutationQueryExampleTests.java[tags=example-hql-mutation-query] +include::{example-dir-query}/MutationQueryExampleTests.java[tags=example-hql-mutation-query] -include::{sourcedir}/MutationQueryExampleTests.java[tags=example-hql-mutation-query-query] +include::{example-dir-query}/MutationQueryExampleTests.java[tags=example-hql-mutation-query-query] ---- ==== @@ -640,9 +632,9 @@ include::{sourcedir}/MutationQueryExampleTests.java[tags=example-hql-mutation-qu ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/MutationQueryExampleTests.java[tags=example-hql-named-mutation-query] +include::{example-dir-query}/MutationQueryExampleTests.java[tags=example-hql-named-mutation-query] -include::{sourcedir}/MutationQueryExampleTests.java[tags=example-hql-named-mutation-query-query] +include::{example-dir-query}/MutationQueryExampleTests.java[tags=example-hql-named-mutation-query-query] ---- ==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/query/hql/QueryLanguage.adoc b/documentation/src/main/asciidoc/userguide/chapters/query/hql/QueryLanguage.adoc index 9a2aafbd2dd7..26ddddb338cb 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/query/hql/QueryLanguage.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/query/hql/QueryLanguage.adoc @@ -1,7 +1,10 @@ [[query-language]] == Hibernate Query Language -:modeldir: ../../../../../../main/java/org/hibernate/userguide/model -:sourcedir: ../../../../../../test/java/org/hibernate/userguide/hql +:root-project-dir: ../../../../../../../.. +:testing-project-dir: {root-project-dir}/hibernate-testing +:example-dir-model: {testing-project-dir}/src/main/java/org/hibernate/testing/orm/domain/userguide +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-hql: {core-project-dir}/src/test/java/org/hibernate/orm/test/hql :extrasdir: extras This chapter describes Hibernate Query Language (HQL) and Jakarta Persistence Query Language (JPQL). @@ -107,7 +110,7 @@ For example, the simplest query in HQL has no `select` clause at all: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-simplest-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-simplest-example] ---- ==== @@ -121,14 +124,14 @@ Naturally, the previous query may be written with a `select` clause: [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-simplest-jpql-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-simplest-jpql-example] ---- When there's no explicit `select` clause, the select list is implied by the result type of the query: [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-no-from] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-no-from] ---- For complicated queries, it's probably best to explicitly specify a `select` list. @@ -140,7 +143,7 @@ An alternative "simplest" query has _only_ a `select` list: ==== [source, SQL, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-simplest-example-alt] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-simplest-example-alt] ---- ==== @@ -155,7 +158,7 @@ But it's more natural to put it last: [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-last-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-last-example] ---- This form of the query is more readable, because the alias is declared _before_ it's used, just as God and nature intended. @@ -182,7 +185,7 @@ For example: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-update-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-update-example] ---- ==== @@ -194,9 +197,9 @@ A single HQL `update` statement might result in multiple SQL update statements e ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/../batch/BatchTest.java[tags=batch-bulk-jpql-update-example] +include::{example-dir-hql}/../batch/BatchTests.java[tags=batch-bulk-jpql-update-example] -include::{sourcedir}/../batch/BatchTest.java[tags=batch-bulk-hql-update-example] +include::{example-dir-hql}/../batch/BatchTests.java[tags=batch-bulk-hql-update-example] ---- ==== @@ -222,7 +225,7 @@ Adding the keyword `versioned`—writing `update versioned`—specifies ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/../batch/BatchTest.java[tags=batch-bulk-hql-update-version-example] +include::{example-dir-hql}/../batch/BatchTests.java[tags=batch-bulk-hql-update-version-example] ---- ==== @@ -288,12 +291,12 @@ For example: ==== [source, SQL, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-insert-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-insert-example] ---- [source, SQL, indent=0] ---- -include::{sourcedir}/../batch/BatchTest.java[tags=batch-bulk-hql-insert-example] +include::{example-dir-hql}/../batch/BatchTests.java[tags=batch-bulk-hql-insert-example] ---- ==== @@ -357,7 +360,7 @@ To escape a single quote within a string literal, use a doubled single quote: `' ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-string-literals-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-string-literals-example] ---- ==== @@ -371,7 +374,7 @@ Numeric literals come in several different forms. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-numeric-literals-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-numeric-literals-example] ---- ==== @@ -472,7 +475,7 @@ Literal values of a Java enumerated type may be written without needing to speci ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-enum-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-enum-example] ---- ==== @@ -487,7 +490,7 @@ HQL allows any Java `static` constant to be used in HQL, but it must be referenc ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-java-constant-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-java-constant-example] ---- ==== @@ -509,14 +512,14 @@ HQL defines two ways to concatenate strings: * the SQL-style concatenation operator, `||`, and * the JPQL-standard `concat()` function. -See <> for details of the `concat()` function. +See <> for details of the `concat()` function. [[hql-concatenation-example]] //.Concatenation operation example ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-concatenation-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-concatenation-example] ---- ==== @@ -532,7 +535,7 @@ The basic SQL arithmetic operators, `+`,`-`,`*`, and `/` are joined by the remai ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-numeric-arithmetic-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-numeric-arithmetic-example] ---- ==== @@ -615,7 +618,7 @@ For example: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-simple-case-expressions-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-simple-case-expressions-example] ---- ==== @@ -637,7 +640,7 @@ For example: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-searched-case-expressions-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-searched-case-expressions-example] ---- ==== @@ -648,7 +651,7 @@ A `case` expression may contain complex expression, including operator expressio ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-case-arithmetic-expressions-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-case-arithmetic-expressions-example] ---- ==== @@ -703,7 +706,7 @@ This is mainly useful when dealing with entity inheritance hierarchies. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-entity-type-exp-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-entity-type-exp-example] ---- ==== @@ -717,7 +720,7 @@ This is useful when dealing with entity inheritance hierarchies. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-treat-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-treat-example] ---- ==== @@ -738,7 +741,7 @@ The target type is an unqualified Java class name: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-cast-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-cast-function-example] ---- ==== @@ -750,7 +753,7 @@ The function `str(x)` is a synonym for `cast(x as String)`. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-str-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-str-function-example] ---- ==== @@ -775,7 +778,7 @@ An abbreviated `case` expression that returns the first non-null operand. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-coalesce-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-coalesce-example] ---- ==== @@ -791,20 +794,21 @@ Evaluates to null if its operands are equal, or to its first argument otherwise. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-nullif-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-nullif-example] ---- ==== [[hql-functions-datetime]] ==== Functions for working with dates and times -There are two very important function for working with dates and times. +There are some very important functions for working with dates and times. |=== | Special function | Purpose | Signature | JPA standard | `extract()` | Extract a datetime field | `extract(field from x)` | ✓ | `format()` | Format a datetime as a string | `format(datetime as pattern)` | ✗ +| `trunc()` or `truncate()` | Datetime truncation | `truncate(datetime, field)` | ✗ |=== [[hql-function-extract]] @@ -812,13 +816,13 @@ There are two very important function for working with dates and times. The special function `extract()` obtains a single field of a date, time, or datetime. -Field types include: `day`, `month`, `year`, `second`, `minute`, `hour`, `day of week`, `day of month`, `week of year`, `date`, `time` and more. +Field types include: `day`, `month`, `year`, `second`, `minute`, `hour`, `day of week`, `day of month`, `week of year`, `date`, `time`, `epoch` and more. For a full list of field types, see the Javadoc for https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/query/TemporalUnit.html[`TemporalUnit`]. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-extract-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-extract-function-example] ---- ==== @@ -840,7 +844,7 @@ TIP: These abbreviations aren't part of the JPQL standard, but on the other hand ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-year-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-year-function-example] ---- ==== @@ -853,6 +857,16 @@ The syntax is `format(datetime as pattern)`, and the pattern must be written in For a full list of `format()` pattern elements, see the Javadoc for https://docs.jboss.org/hibernate/orm/{majorMinorVersion}/javadocs/org/hibernate/dialect/Dialect.html#appendDatetimeFormat[`Dialect#appendDatetimeFormat`]. +[[hql-function-trunc-datetime]] +===== `trunc()` or `truncate()` + +This function truncates a date, time, or datetime to the temporal unit specified by field. + +The syntax is `truncate(datetime, field)`. Supported temporal units are: `year`, `month`, `day`, `hour`, `minute` or `second`. + +Truncating a date, time or datetime value translates to obtaining a value of the same type in which all temporal units smaller than `field` have been pruned. +For hours, minutes and second this means setting them to `00`. For months and days, this means setting them to `01`. + [[hql-string-functions]] ==== Functions for working with strings @@ -892,7 +906,7 @@ Accepts a variable number of arguments, and produces a string by concatenating t ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-concat-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-concat-function-example] ---- ==== @@ -904,7 +918,7 @@ The JPQL function `locate()` determines the position of a substring within anoth ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-locate-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-locate-function-example] ---- ==== @@ -915,7 +929,7 @@ The `position()` function has a similar purpose, but follows the ANSI SQL syntax ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-position-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-position-function-example] ---- ==== @@ -928,7 +942,7 @@ Returns a substring of the given string. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-substring-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-substring-function-example] ---- ==== @@ -939,7 +953,7 @@ It may be used to trim `leading` characters, `trailing` characters, or both. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-trim-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-trim-function-example] ---- ==== @@ -1015,15 +1029,15 @@ Of course, we also have a number of functions for working with numeric values. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-abs-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-abs-function-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-mod-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-mod-function-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-sqrt-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-sqrt-function-example] ---- ==== @@ -1057,7 +1071,7 @@ The number of elements of a collection or to-many association. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-size-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-size-example] ---- ==== @@ -1124,7 +1138,7 @@ Then at startup Hibernate will log a list of type signatures of all registered f ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-native-function-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-native-function-example] ---- ==== @@ -1170,7 +1184,7 @@ The operands should be of the same type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-relational-comparisons-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-relational-comparisons-example] ---- ==== @@ -1186,7 +1200,7 @@ Of course, all three operands must be of compatible type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-between-predicate-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-between-predicate-example] ---- ==== @@ -1207,7 +1221,7 @@ The following operators make it easier to deal with null values. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-null-predicate-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-null-predicate-example] ---- ==== @@ -1236,7 +1250,7 @@ The expression on the right is a pattern, where: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-like-predicate-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-like-predicate-example] ---- ==== @@ -1249,7 +1263,7 @@ For example, to match all stored procedures prefixed with `Dr_`, the like criter ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-like-predicate-escape-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-like-predicate-escape-example] ---- ==== @@ -1317,7 +1331,7 @@ Even embedded attributes are allowed, although that feature depends on the level ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-in-predicate-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-in-predicate-example] ---- ==== @@ -1326,7 +1340,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-in-predicate-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-collection-expressions-in-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-expressions-in-example] ---- ==== @@ -1352,7 +1366,7 @@ The qualifiers are unary prefix operators: `all`, `every`, `any`, and `some`. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-all-subquery-comparison-qualifier-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-all-subquery-comparison-qualifier-example] ---- ==== @@ -1361,12 +1375,12 @@ include::{sourcedir}/HQLTest.java[tags=hql-all-subquery-comparison-qualifier-exa ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-collection-expressions-all-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-expressions-all-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-collection-expressions-some-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-expressions-some-example] ---- ==== @@ -1387,7 +1401,7 @@ As you can surely guess, `not exists` evaluates to true if the thing to the righ ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-collection-expressions-exists-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-expressions-exists-example] ---- ==== @@ -1408,7 +1422,7 @@ The following operators apply to collection-valued attributes and to-many associ ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-empty-collection-predicate-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-empty-collection-predicate-example] ---- ==== @@ -1417,7 +1431,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-empty-collection-predicate-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-member-of-collection-predicate-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-member-of-collection-predicate-example] ---- ==== @@ -1463,7 +1477,7 @@ Remember, the _entity name_ is the value of the `name` member of the `@Entity` a ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-simplest-jpql-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-simplest-jpql-example] ---- ==== @@ -1477,7 +1491,7 @@ Then Hibernate will query every entity which inherits the named type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-simplest-jpql-fqn-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-simplest-jpql-fqn-example] ---- ==== @@ -1488,12 +1502,12 @@ Of course, there may be multiple root entities. ==== [source, SQL, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-multiple-root-reference-jpql-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-multiple-root-reference-jpql-example] ---- [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-multiple-same-root-reference-jpql-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-multiple-same-root-reference-jpql-example] ---- ==== @@ -1504,7 +1518,7 @@ The previous queries may even be written using the syntax `cross join` in place ==== [source, SQL, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-cross-join-jpql-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-cross-join-jpql-example] ---- ==== @@ -1518,7 +1532,7 @@ Consider: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-polymorphism-example, indent=0] +include::{example-dir-hql}/HQLTest.java[tags=hql-polymorphism-example, indent=0] ---- ==== @@ -1547,7 +1561,7 @@ It must declare an identification variable. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-derived-root-example, indent=0] +include::{example-dir-hql}/HQLTest.java[tags=hql-derived-root-example, indent=0] ---- ==== @@ -1597,7 +1611,7 @@ An explicit root join works just like an ANSI-style join in SQL. ==== [source, SQL, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-explicit-root-join-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-explicit-root-join-example] ---- ==== @@ -1627,7 +1641,7 @@ An explicit join may assign an identification variable to the joined entity. ==== [source, SQL, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-explicit-inner-join-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-explicit-inner-join-example] ---- ==== @@ -1636,7 +1650,7 @@ include::{sourcedir}/HQLTest.java[tags=hql-explicit-inner-join-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-explicit-outer-join-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-explicit-outer-join-example] ---- ==== @@ -1663,7 +1677,7 @@ Join conditions occurring in the `with` or `on` clause are added to the `on` cla ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-explicit-join-with-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-explicit-join-with-example] ---- ==== @@ -1674,7 +1688,7 @@ The following query is arguably less clear, but semantically equivalent: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-explicit-join-jpql-on-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-explicit-join-jpql-on-example] ---- ==== @@ -1701,7 +1715,7 @@ For example, if `Person` has a one-to-many association named `phones`, the use o ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-explicit-fetch-join-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-explicit-fetch-join-example] ---- ==== @@ -1738,7 +1752,7 @@ An explicit join may narrow the type of the joined entity using `treat()`. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-join-treat-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-join-treat-example] ---- ==== @@ -1761,7 +1775,7 @@ The `lateral` keyword just distinguishes the two cases. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-derived-join-example, indent=0] +include::{example-dir-hql}/HQLTest.java[tags=hql-derived-join-example, indent=0] ---- ==== @@ -1808,7 +1822,7 @@ In the second case, Hibernate with automatically add a join to the generated SQL ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-implicit-join-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-implicit-join-example] ---- ==== @@ -1825,7 +1839,7 @@ Note that: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-implicit-join-alias-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-implicit-join-alias-example] ---- ==== @@ -1843,7 +1857,7 @@ When a join involves a collection or many-valued association, the declared ident ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-collection-valued-associations] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-valued-associations] ---- ==== @@ -1872,9 +1886,9 @@ In particular, `index()` and `key()` obtain a reference to a list index or map k ==== [source, JAVA, indent=0] ---- -include::{modeldir}/Phone.java[tags=hql-collection-qualification-example, indent=0] +include::{example-dir-model}/Phone.java[tags=hql-collection-qualification-example, indent=0] -include::{sourcedir}/HQLTest.java[tags=hql-collection-qualification-example, indent=0] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-qualification-example, indent=0] ---- ==== @@ -1887,7 +1901,7 @@ The functions `element()`, `index()`, `key()`, and `value()` may even be applied ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-collection-implicit-join-example, indent=0] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-implicit-join-example, indent=0] ---- ==== @@ -1898,7 +1912,7 @@ An element of an indexed collection (an array, list, or map) may even be identif ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-collection-index-operator-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-collection-index-operator-example] ---- ==== @@ -1924,14 +1938,14 @@ There's no need to bother with trying to represent a "tuple of length 1". But if there are multiple expressions in the select list then: - by default, each query result is packaged as an array of type `Object[]`, or -- if explicitly requested by passing the class `Tuple` to `createQuery()`, the query result is packaged as an instance of `javax.persistence.Tuple`. +- if explicitly requested by passing the class `Tuple` to `createQuery()`, the query result is packaged as an instance of `jakarta.persistence.Tuple`. [[hql-select-clause-projection-example]] //.Query results as lists ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=jpql-projection-example] +include::{example-dir-hql}/HQLTest.java[tags=jpql-projection-example] ---- ==== @@ -1967,9 +1981,9 @@ The `select new` construct packages the query results into a user-written Java c ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CallStatistics.java[tags=hql-select-clause-dynamic-instantiation-example] +include::{example-dir-hql}/CallStatistics.java[tags=hql-select-clause-dynamic-instantiation-example] -include::{sourcedir}/HQLTest.java[tags=hql-select-clause-dynamic-instantiation-example, indent=0] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-clause-dynamic-instantiation-example, indent=0] ---- ==== @@ -1989,7 +2003,7 @@ Alternatively, using the syntax `select new map`, the query may specify that eac ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-clause-dynamic-map-instantiation-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-clause-dynamic-map-instantiation-example] ---- ==== @@ -2003,7 +2017,7 @@ Or, using the syntax `select new list`, the query may specify that each result s ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/HQLTest.java[tags=hql-select-clause-dynamic-list-instantiation-example] +include::{example-dir-hql}/HQLTest.java[tags=hql-select-clause-dynamic-list-instantiation-example] ---- ==== @@ -2025,7 +2039,7 @@ It's only effect is to add `distinct` to the generated SQL. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SelectDistinctTest.java[tags=hql-distinct-projection-query-example] +include::{example-dir-hql}/SelectDistinctTest.java[tags=hql-distinct-projection-query-example] ---- ==== @@ -2072,7 +2086,7 @@ There are also < - org.hibernate + org.hibernate.orm hibernate-spatial ${hibernate.version} @@ -60,10 +62,9 @@ For Maven, you need to add the following dependency: ==== -Not all databases support all the functions defined by Hibernate Spatial. -The table below provides an overview of the functions provided by each database. If the function is defined in the -https://portal.opengeospatial.org/files/?artifact_id=829[Simple Feature Specification], the description references the -relevant section. +Hibernate defines common spatial functions uniformly over all databases. These +functions largely correspond to those specified in the https://portal.opengeospatial.org/files/?artifact_id=829[Simple Feature Specification]. Not all databases are capable of supporting every function, however. The table below details which functions are supported by various database systems. + :yes: icon:check[role="green"] :no: icon:times[role="red"] @@ -108,13 +109,31 @@ relevant section. |================================ ^(1)^ Argument Geometries need to have the same dimensionality. -[NOTE] -==== -In previous versions Hibernate Spatial registered the SFS spatial functions under names without the "st_" prefix. Starting -from Hibernate 6.0, the functions are registered both with and without the prefix. So, e.g., both `st_dimension(geom)` and -`dimension(geom)` will work. -==== +Note that beyond the common spatial functions mentioned above, Hibernate may define additional spatial functions for each database dialect. These will be documented in the +Database notes below. + === Database notes +[[spatial-configuration-dialect-postgresql]] +Postgresql:: + +The Postgresql dialect has support for the https://postgis.net/[Postgis spatial extension], but not the Geometric types mentioned in the +https://www.postgresql.org/docs/current/datatype-geometric.html[Postgresql documentation]. + +In addition to the common spatial functions, the following functions are supported: + + +.Additional Postgis function support + +|=== +| Function | Purpose | Syntax | Postgis function operator +|`distance_2d` | 2D distance between two geometries|`distance_2d(geom,geom)`| https://postgis.net/docs/manual-3.3/geometry_distance_knn.html[\<\->] +|`distance_2d_bbox` | 2D distance between the bounding boxes of tow geometries|`distance_2d_bbox(geom,geom)`| https://postgis.net/docs/manual-3.3/geometry_distance_box.html[<#>] +|`distance_cpa` | 3D distance between 2 trajectories|`distance_cpa(geom,geom)`| https://postgis.net/docs/manual-3.3/geometry_distance_cpa.html[\|=\|] +|`distance_centroid_nd` | the n-D distance between the centroids of the bounding boxes of two geometries|`distance_centroid_nd(geom,geom)`| https://postgis.net/docs/manual-3.3/geometry_distance_centroid_nd.html[<\<\->>] + +|=== + + [[spatial-configuration-dialect-mysql]] MySQL:: @@ -200,10 +219,6 @@ create transform for db2gse.st_geometry db2_program ( [[spatial-types]] === Types -Hibernate Spatial comes with the following types: - -TODO - It suffices to declare a property as either a JTS or a Geolatte-geom `Geometry` and Hibernate Spatial will map it using the relevant type. @@ -213,7 +228,7 @@ Here is an example using JTS: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SpatialTest.java[tags=spatial-types-mapping-example, indent=0] +include::{example-dir-spatial}/SpatialTest.java[tags=spatial-types-mapping-example, indent=0] ---- ==== @@ -224,7 +239,7 @@ We can now treat spatial geometries like any other type. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SpatialTest.java[tags=spatial-types-point-creation-example] +include::{example-dir-spatial}/SpatialTest.java[tags=spatial-types-point-creation-example] ---- ==== @@ -236,7 +251,7 @@ could use the `within` function to find all objects within a given spatial exten ==== [source, SQL, indent=0] ---- -include::{sourcedir}/SpatialTest.java[tags=spatial-types-query-example] +include::{example-dir-spatial}/SpatialTest.java[tags=spatial-types-query-example] ---- ==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/schema/Schema.adoc b/documentation/src/main/asciidoc/userguide/chapters/schema/Schema.adoc index 9794652a2c1c..184972fe031b 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/schema/Schema.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/schema/Schema.adoc @@ -1,8 +1,10 @@ [[schema-generation]] -== Schema generation -:sourcedir: ../../../../../test/java/org/hibernate/userguide/schema +== Schema Generation +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-schemagen: {core-project-dir}/src/test/java/org/hibernate/orm/test/schema +:example-dir-schemagen-resources: {core-project-dir}/src/test/resources :extrasdir: extras -:resourcesdir: ../../../../../test/resources Hibernate allows you to generate the database from the entity mappings. @@ -22,7 +24,7 @@ Considering the following Domain Model: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/BaseSchemaGeneratorTest.java[tags=schema-generation-domain-model-example] +include::{example-dir-schemagen}/BaseSchemaGeneratorTest.java[tags=schema-generation-domain-model-example] ---- ==== @@ -49,7 +51,7 @@ For instance, considering the following `schema-generation.sql` import file: ==== [source, JAVA, indent=0] ---- -include::{resourcesdir}/schema-generation.sql[] +include::{example-dir-schemagen-resources}/schema-generation.sql[] ---- ==== @@ -80,7 +82,7 @@ Considering the following HBM mapping: ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/SchemaGenerationTest.hbm.xml[] +include::{example-dir-schemagen}/SchemaGenerationTest.hbm.xml[] ---- ==== @@ -96,7 +98,7 @@ Hibernate offers the `@Check` annotation so that you can specify an arbitrary SQ ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CheckTest.java[tag=schema-generation-database-checks-example] +include::{example-dir-schemagen}/CheckTest.java[tag=schema-generation-database-checks-example] ---- ==== @@ -108,7 +110,7 @@ a `ConstraintViolationException` is going to be thrown. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/CheckTest.java[tag=schema-generation-database-checks-persist-example] +include::{example-dir-schemagen}/CheckTest.java[tag=schema-generation-database-checks-persist-example] ---- [source, SQL, indent=0] @@ -127,7 +129,7 @@ With Hibernate, you can specify a default value for a given database column usin ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ColumnDefaultTest.java[tag=schema-generation-column-default-value-mapping-example] +include::{example-dir-schemagen}/ColumnDefaultTest.java[tag=schema-generation-column-default-value-mapping-example] ---- [source, SQL, indent=0] @@ -150,7 +152,7 @@ This way, when the `name` and or `clientId` attribute is null, the database will ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/ColumnDefaultTest.java[tag=schema-generation-column-default-value-persist-example] +include::{example-dir-schemagen}/ColumnDefaultTest.java[tag=schema-generation-column-default-value-persist-example] ---- [source, SQL, indent=0] @@ -176,7 +178,7 @@ Considering the following entity mapping, Hibernate generates the unique constra ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/UniqueConstraintTest.java[tag=schema-generation-columns-unique-constraint-mapping-example] +include::{example-dir-schemagen}/UniqueConstraintTest.java[tag=schema-generation-columns-unique-constraint-mapping-example] ---- [source, SQL, indent=0] @@ -193,7 +195,7 @@ it's no longer possible to add two books with the same title and for the same au ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/UniqueConstraintTest.java[tag=schema-generation-columns-unique-constraint-persist-example] +include::{example-dir-schemagen}/UniqueConstraintTest.java[tag=schema-generation-columns-unique-constraint-persist-example] ---- [source, SQL, indent=0] @@ -216,7 +218,7 @@ Considering the following entity mapping. Hibernate generates the index when cre ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/IndexTest.java[tag=schema-generation-columns-index-mapping-example] +include::{example-dir-schemagen}/IndexTest.java[tag=schema-generation-columns-index-mapping-example] ---- [source, SQL, indent=0] diff --git a/documentation/src/main/asciidoc/userguide/chapters/statistics/Statistics.adoc b/documentation/src/main/asciidoc/userguide/chapters/statistics/Statistics.adoc index ce5a4e9630b8..a520e950dec7 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/statistics/Statistics.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/statistics/Statistics.adoc @@ -1,6 +1,5 @@ [[statistics]] == Statistics -:stat-sourcedir: ../../../../../../../hibernate-core/src/main/java/org/hibernate/stat Hibernate can gather all sorts of statistics which can help you get a better insight into what Hibernate does behind the scenes. diff --git a/documentation/src/main/asciidoc/userguide/chapters/tooling/Tooling.adoc b/documentation/src/main/asciidoc/userguide/chapters/tooling/Tooling.adoc index 17328aed44ff..cea3eaa068f2 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/tooling/Tooling.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/tooling/Tooling.adoc @@ -1,8 +1,8 @@ [[tooling]] == Build Tool Integration -:rootProjectDir: ../../../../../../.. -:documentationProjectDir: {rootProjectDir}/documentation -:documentationModel: {documentationProjectDir}/src/main/java/org/hibernate/userguide/model +:root-project-dir: ../../../../../../.. +:testing-project-dir: {root-project-dir}/hibernate-testing +:example-dir-model: {testing-project-dir}/src/main/java/org/hibernate/testing/orm/domain/userguide Hibernate provides build-time services available as plugins for diff --git a/documentation/src/main/asciidoc/userguide/chapters/tooling/extras/maven-example-metamodel.pom b/documentation/src/main/asciidoc/userguide/chapters/tooling/extras/maven-example-metamodel.pom new file mode 100644 index 000000000000..56f4a3f1c9b6 --- /dev/null +++ b/documentation/src/main/asciidoc/userguide/chapters/tooling/extras/maven-example-metamodel.pom @@ -0,0 +1,27 @@ + + + [...] + + org.apache.maven.plugins + maven-compiler-plugin + ... + + + + org.hibernate.orm + hibernate-jpamodelgen + $currentHibernateVersion + + + + org.sample + sample-dependency + + + + + + + [...] + + \ No newline at end of file diff --git a/documentation/src/main/asciidoc/userguide/chapters/tooling/gradle.adoc b/documentation/src/main/asciidoc/userguide/chapters/tooling/gradle.adoc index 67bb95a5d009..39b882883a13 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/tooling/gradle.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/tooling/gradle.adoc @@ -1,10 +1,14 @@ [[tooling-gradle]] -=== Gradle Plugin +=== Gradle -For integrating with https://gradle.org[Gradle], Hibernate provides the -https://plugins.gradle.org/plugin/org.hibernate.orm[org.hibernate.orm] plugin which -supports bytecode enhancement and static metamodel generation but not schema tooling. +Hibernate provides the ability to integrate both +<> and <> capabilities into Gradle builds. +[[tooling-gradle-enhancement]] +==== Bytecode Enhancement + +Bytecode enhancement is incorporated into Gradle builds using Hibernate's +https://plugins.gradle.org/plugin/org.hibernate.orm[Gradle plugin]. To apply the plugin, use Gradle's `plugins {}` block: [source,gradle] @@ -14,24 +18,18 @@ plugins { } ---- - Applying the plugin creates a `hibernate` extension (`HibernateOrmSpec`) to configure the plugin. -By default, when the plugin is applied, support for both bytecode enhancement and static metamodel -generation is enabled. [source,gradle] ---- hibernate { - // for illustration, let's disable both - disableEnhancement - disableJpaMetamodel + ... } ---- -[[tooling-gradle-enhancement]] -==== Bytecode Enhancement +Enhancement is configured through the `enhancement` extension. -Enhancement is configured through the `enhancement` extension: +NOTE: `hibernate {}` and `enhancement {}` are separate to allow for schema tooling capabilities to be added later. [source,gradle] ---- @@ -47,13 +45,9 @@ hibernate { The extension is of type `EnhancementSpec` which exposes the following properties: - -enableLazyInitialization:: Whether to incorporate lazy loading support into the enhanced bytecode. Defaults to `true`. This setting is deprecated for removal without a replacement. -enableDirtyTracking:: Whether to incorporate dirty tracking into the enhanced bytecode. Defaults to `true`. This setting is deprecated for removal without a replacement. -enableAssociationManagement:: Whether to add bidirectional association management into the enhanced bytecode - - -Which all default to false (disabled). +enableLazyInitialization:: Whether to incorporate lazy loading support into the enhanced bytecode. Defaults to `true`. This setting is deprecated for removal without a replacement. See <> +enableDirtyTracking:: Whether to incorporate dirty tracking into the enhanced bytecode. Defaults to `true`. This setting is deprecated for removal without a replacement. See <>. +enableAssociationManagement:: Whether to add bidirectional association management into the enhanced bytecode. See <>. It also exposes the following method forms: @@ -65,8 +59,10 @@ It also exposes the following method forms: [[tooling-gradle-modelgen]] ==== Static Metamodel Generation -One approach to integrate Static Metamodel generation into a Gradle build is to -use Gradle's support for annotation processors - +Static metamodel generation can be incorporated into Gradle builds via the +annotation processor provided by the `org.hibernate.orm:hibernate-jpamodelgen` artifact. Applying +an annotation processor in Gradle is super easy - + [source,gradle] ---- @@ -74,40 +70,3 @@ dependencies { annotationProcessor "org.hibernate.orm:hibernate-jpamodelgen:${hibernateVersion}" } ---- - -When the build does not need bytecode enhancement support, this is a perfectly valid solution. - -The plugin supports simpler configuration of the generator using the registered -`jpaMetamodel` extension: - - -[source,gradle] ----- -hibernate { - jpaMetamodel { - applyGeneratedAnnotation false - suppress 'raw' - generationOutputDirectory "${buildDir}/generated/sources/modelgen" - compileOutputDirectory "${buildDir}/classes/java/modelgen" - } -} ----- - -The extension is of type `JpaMetamodelGenerationSpec`, which exposes the following configuration properties: - -applyGeneratedAnnotation:: Should the `javax.annotation.processing.Generated` annotation be added to the -generated classes. Defaults to `true`. -suppressions:: Suppressions to add to the generated classes. Defaults to `['raw', 'deprecation']` -generationOutputDirectory:: Directory where the generated metamodel classes should be created. Defaults -to `${buildDir}/generated/sources/jpaMetamodel` -[[tooling-gradle-modelgen-compile-output]] -compileOutputDirectory:: Directory where the classes compiled from the generated metamodel classes should be -created. Defaults to `${buildDir}/classes/java/jpaMetamodel`. - -It also exposes the following method forms: - -* applyGeneratedAnnotation(boolean) -* suppress(String) -* generationOutputDirectory(Object) -* compileOutputDirectory(Object) - diff --git a/documentation/src/main/asciidoc/userguide/chapters/tooling/maven.adoc b/documentation/src/main/asciidoc/userguide/chapters/tooling/maven.adoc index a5606327535a..3dc4d31f5e78 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/tooling/maven.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/tooling/maven.adoc @@ -1,16 +1,35 @@ [[tooling-maven]] -=== Maven Plugin +=== Maven + +The following sections illustrate how both <> and <> capabilities can be integrated into Maven builds. + +[[tooling-maven-enhancement]] +==== Bytecode Enhancement Hibernate provides a https://maven.apache.org/[Maven] plugin capable of providing build-time enhancement of the domain model as they are compiled as part of a Maven -build. See the section on the <> for details +build. See the section on <> for details on the configuration settings. By default, all enhancements are disabled. -.Apply the Maven plugin +.Apply the Bytecode Enhancement plugin ==== [source,xml] ---- include::extras/maven-example.pom[] ---- -==== \ No newline at end of file +==== + +[[tooling-maven-modelgen]] +==== Static Metamodel Generation + +Static metamodel generation should be integrated into a maven project through the annotation processor +paths of the maven compiler plugin. + +.Integrate the metamodel generator +==== +[source,xml] +---- +include::extras/maven-example-metamodel.pom[] +---- +==== diff --git a/documentation/src/main/asciidoc/userguide/chapters/tooling/modelgen.adoc b/documentation/src/main/asciidoc/userguide/chapters/tooling/modelgen.adoc index bb329ac3a96d..1128003ff3aa 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/tooling/modelgen.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/tooling/modelgen.adoc @@ -1,35 +1,35 @@ [[tooling-modelgen]] === Static Metamodel Generator -:rootProjectDir: ../../../../../../.. -:documentationProjectDir: {rootProjectDir}/documentation -:documentationModel: {documentationProjectDir}/src/main/java/org/hibernate/userguide/model -:documentationMetamodel: {documentationProjectDir}/target/generated/sources/annotationProcessor/java/main/org/hibernate/userguide/model -:toolingTestsDir: {documentationProjectDir}/src/test/java/org/hibernate/userguide/tooling - -Jakarta Persistence defines a typesafe Criteria API which allows `Criteria` queries to be constructed in a -strongly-typed manner, utilizing so-called static metamodel classes. For developers, it is important that -the task of the metamodel generation can be automated. Hibernate Static Metamodel Generator is an annotation -processor based on https://jcp.org/en/jsr/detail?id=269[JSR_269] with the task of creating Jakarta Persistence -static metamodel classes. - -See <> for discussion of Jakarta Persistence criteria queries. - -The Hibernate Static Metamodel Generator is defined by the published `org.hibernate.orm:metamodel-generator` -artifact. As it is defined as an -https://docs.oracle.com/en/java/javase/11/tools/javac.html#GUID-082C33A5-CBCA-471A-845E-E77F79B7B049[annotation processor], -it is usable anytime `javac` is used. See the tool-specific discussions (<>, <> -and <>) for details on integrating the generator into those environments. - -NOTE:: The fully qualified name of the processor class is `org.hibernate.jpamodelgen.JPAMetaModelEntityProcessor`. - -[TIP] +:root-project-dir: ../../../../../../.. +:testing-project-dir: {root-project-dir}/hibernate-testing +:example-dir-model: {testing-project-dir}/src/main/java/org/hibernate/testing/orm/domain/userguide +:example-dir-metamodelgen-generated: {testing-project-dir}/target/generated/sources/annotationProcessor/java/main/org/hibernate/testing/orm/domain/userguide +:core-project-dir: {root-project-dir}/hibernate-core +:toolingTestsDir: {core-project-dir}/src/test/java/org/hibernate/orm/test/tooling +:ann-proc: https://docs.oracle.com/en/java/javase/11/tools/javac.html#GUID-082C33A5-CBCA-471A-845E-E77F79B7B049__GUID-3FA757C8-B67B-46BC-AEF9-7C3FFB126A93 +:ann-proc-path: https://docs.oracle.com/en/java/javase/11/tools/javac.html#GUID-AEEC9F07-CB49-4E96-8BC7-BCC2C7F725C9__GUID-214E175F-0F06-4CDC-B511-5BA469955F5A +:ann-proc-options: https://docs.oracle.com/en/java/javase/11/tools/javac.html#GUID-AEEC9F07-CB49-4E96-8BC7-BCC2C7F725C9__GUID-6CC814A4-8A29-434A-B7E1-DF8234784E7C +:intg-guide: https://docs.jboss.org/hibernate/orm/6.3/introduction/html_single/Hibernate_Introduction.html#generator + +Jakarta Persistence defines a typesafe Criteria API which allows <> +queries to be constructed in a strongly-typed manner, utilizing so-called static metamodel +classes. The Hibernate Static Metamodel Generator, available via the published +`org.hibernate.orm:hibernate-jpamodelgen` artifact, is an link:{ann-proc}[annotation processor] +used to generate these static metamodel classes. + +[NOTE] ==== -The `javac` option _-proc:only_ instructs the compiler to just run the annotation processing. -You can also disable annotation processing by specifying _-proc:none_. +The Hibernate Static Metamodel Generator has many additional capabilities beyond static metamodel +class generation. See the link:{intg-guide}[Introduction Guide] for a complete discussion of its +capabilities. The rest of the discussion here is limited to the Jakarta Persistence static metamodel. -Run `'javac -help'` to see which other annotation processor relevant options can be specified. +The generator is expected to be run using the `javac` link:{ann-proc-path}[-processorpath] option. +See the tool-specific discussions (<>, <> +and <>) for details on integrating the generator into those environments. ==== +[[tooling-modelgen-classes]] +==== Metamodel classes The structure of the metamodel classes is described in the Jakarta Persistence specification, but for completeness the definition is repeated in the following @@ -78,9 +78,9 @@ As an example, consider the following domain model - ==== [source, JAVA, indent=0] ---- -include::{documentationModel}/tooling/Customer.java[tags=tooling-modelgen-model] -include::{documentationModel}/tooling/Order.java[tags=tooling-modelgen-model] -include::{documentationModel}/tooling/Item.java[tags=tooling-modelgen-model] +include::{example-dir-model}/tooling/Customer.java[tags=tooling-modelgen-model] +include::{example-dir-model}/tooling/Order.java[tags=tooling-modelgen-model] +include::{example-dir-model}/tooling/Item.java[tags=tooling-modelgen-model] ---- ==== @@ -91,7 +91,7 @@ Given this model, the generator will produce classes named `Customer_`, `Order_` ==== [source, JAVA, indent=0] ---- -include::{documentationMetamodel}/tooling/Order_.java[] +include::{example-dir-metamodelgen-generated}/tooling/Order_.java[] ---- ==== @@ -107,3 +107,18 @@ include::{toolingTestsDir}/modelgen/ModelGenTests.java[tags=tooling-modelgen-usa ---- ==== +[[tooling-modelgen-options]] +==== Generation Options + +The Hibernate Static Metamodel Generator accepts a number of configuration options, which are specified as +part of the `javac` execution using standard link:{ann-proc-options}[-A] options - + +`-Adebug=[true|false]`:: Enables debug logging from the generator. +`-AfullyAnnotationConfigured=[true|false]`:: Controls whether `orm.xml` mapping should be considered. +`-ApersistenceXml=[path]`:: Specifies the path to the `persistence.xml` file. +`-AormXml=[path]`:: Specifies the path to an `orm.xml` file. +`-AlazyXmlParsing=[true|false]`:: Controls whether the processor should attempt to determine whether any `orm.xml` files have changed. +`-AaddGeneratedAnnotation=[true|false]`:: Controls whether the processor should add `@jakarta.annotation.Generated` to the generated classes. +`-addGenerationDate=[true|false]`:: Controls whether the processor should add `@jakarta.annotation.Generated#date`. +`-addSuppressWarningsAnnotation=[true|false]`:: Controls whether the processor should add `@SuppressWarnings({"deprecation","rawtypes"})` to the generated classes. + diff --git a/documentation/src/main/asciidoc/userguide/chapters/transactions/Transactions.adoc b/documentation/src/main/asciidoc/userguide/chapters/transactions/Transactions.adoc index 4680789c517c..028d770c752e 100644 --- a/documentation/src/main/asciidoc/userguide/chapters/transactions/Transactions.adoc +++ b/documentation/src/main/asciidoc/userguide/chapters/transactions/Transactions.adoc @@ -1,6 +1,8 @@ [[transactions]] -== Transactions and concurrency control -:sourcedir: ../../../../../test/java/org/hibernate/userguide/transactions +== Transactions +:root-project-dir: ../../../../../../.. +:core-project-dir: {root-project-dir}/hibernate-core +:example-dir-transaction: {core-project-dir}/src/test/java/org/hibernate/orm/test/transactions It is important to understand that the term transaction has many different yet related meanings in regards to persistence and Object/Relational Mapping. In most use-cases these definitions align, but that is not always the case. @@ -116,7 +118,7 @@ Let's take a look at using the Transaction API in the various environments. ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/TransactionsTest.java[tags=transactions-api-jdbc-example] +include::{example-dir-transaction}/TransactionsTest.java[tags=transactions-api-jdbc-example] ---- ==== @@ -125,7 +127,7 @@ include::{sourcedir}/TransactionsTest.java[tags=transactions-api-jdbc-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/TransactionsTest.java[tags=transactions-api-cmt-example] +include::{example-dir-transaction}/TransactionsTest.java[tags=transactions-api-cmt-example] ---- ==== @@ -134,7 +136,7 @@ include::{sourcedir}/TransactionsTest.java[tags=transactions-api-cmt-example] ==== [source, JAVA, indent=0] ---- -include::{sourcedir}/TransactionsTest.java[tags=transactions-api-bmt-example] +include::{example-dir-transaction}/TransactionsTest.java[tags=transactions-api-bmt-example] ---- ==== diff --git a/documentation/src/main/java/org/hibernate/userguide/model/Call.java b/documentation/src/main/java/org/hibernate/userguide/model/Call.java deleted file mode 100644 index ac7d244786bf..000000000000 --- a/documentation/src/main/java/org/hibernate/userguide/model/Call.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.userguide.model; - -import java.time.LocalDateTime; -import jakarta.persistence.Column; -import jakarta.persistence.Entity; -import jakarta.persistence.GeneratedValue; -import jakarta.persistence.Id; -import jakarta.persistence.ManyToOne; -import jakarta.persistence.Table; - -/** - * @author Vlad Mihalcea - */ -//tag::hql-examples-domain-model-example[] -@Entity -@Table(name = "phone_call") -public class Call { - - @Id - @GeneratedValue - private Long id; - - @ManyToOne - private Phone phone; - - @Column(name = "call_timestamp") - private LocalDateTime timestamp; - - private int duration; - - @ManyToOne - private Payment payment; - - //Getters and setters are omitted for brevity - -//end::hql-examples-domain-model-example[] - public Call() {} - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Phone getPhone() { - return phone; - } - - public void setPhone(Phone phone) { - this.phone = phone; - } - - public LocalDateTime getTimestamp() { - return timestamp; - } - - public void setTimestamp(LocalDateTime timestamp) { - this.timestamp = timestamp; - } - - public int getDuration() { - return duration; - } - - public void setDuration(int duration) { - this.duration = duration; - } - - public Payment getPayment() { - return payment; - } - - public void setPayment(Payment payment) { - this.payment = payment; - } - //tag::hql-examples-domain-model-example[] -} -//end::hql-examples-domain-model-example[] diff --git a/documentation/src/main/java/org/hibernate/userguide/model/Payment.java b/documentation/src/main/java/org/hibernate/userguide/model/Payment.java deleted file mode 100644 index a74e832cbe43..000000000000 --- a/documentation/src/main/java/org/hibernate/userguide/model/Payment.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.userguide.model; - -import java.math.BigDecimal; -import jakarta.persistence.Entity; -import jakarta.persistence.GeneratedValue; -import jakarta.persistence.Id; -import jakarta.persistence.Inheritance; -import jakarta.persistence.InheritanceType; -import jakarta.persistence.ManyToOne; - -/** - * @author Vlad Mihalcea - */ -//tag::hql-examples-domain-model-example[] -@Entity -@Inheritance(strategy = InheritanceType.JOINED) -public class Payment { - - @Id - @GeneratedValue - private Long id; - - private BigDecimal amount; - - private boolean completed; - - @ManyToOne - private Account account; - - @ManyToOne - private Person person; - - //Getters and setters are omitted for brevity - -//end::hql-examples-domain-model-example[] - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public BigDecimal getAmount() { - return amount; - } - - public void setAmount(BigDecimal amount) { - this.amount = amount; - } - - public boolean isCompleted() { - return completed; - } - - public void setCompleted(boolean completed) { - this.completed = completed; - } - - public Person getPerson() { - return person; - } - - public void setPerson(Person person) { - this.person = person; - } - - public Account getAccount() { - return account; - } - - public void setAccount(Account account) { - this.account = account; - } - //tag::hql-examples-domain-model-example[] -} -//end::hql-examples-domain-model-example[] diff --git a/documentation/src/main/java/org/hibernate/userguide/model/Person.java b/documentation/src/main/java/org/hibernate/userguide/model/Person.java deleted file mode 100644 index e6dff50b62b8..000000000000 --- a/documentation/src/main/java/org/hibernate/userguide/model/Person.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.userguide.model; - -import java.time.LocalDateTime; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import jakarta.persistence.CascadeType; -import jakarta.persistence.ColumnResult; -import jakarta.persistence.ConstructorResult; -import jakarta.persistence.ElementCollection; -import jakarta.persistence.Entity; -import jakarta.persistence.EntityResult; -import jakarta.persistence.EnumType; -import jakarta.persistence.FieldResult; -import jakarta.persistence.GeneratedValue; -import jakarta.persistence.Id; -import jakarta.persistence.MapKeyEnumerated; -import jakarta.persistence.NamedNativeQuery; -import jakarta.persistence.NamedQuery; -import jakarta.persistence.NamedStoredProcedureQuery; -import jakarta.persistence.OneToMany; -import jakarta.persistence.OrderColumn; -import jakarta.persistence.ParameterMode; -import jakarta.persistence.QueryHint; -import jakarta.persistence.SqlResultSetMapping; -import jakarta.persistence.StoredProcedureParameter; -import jakarta.persistence.Version; - -/** - * @author Vlad Mihalcea - */ -//tag::sql-scalar-NamedNativeQuery-example[] -@NamedNativeQuery( - name = "find_person_name", - query = - "SELECT name " + - "FROM Person ", - resultClass = String.class -) -//end::sql-scalar-NamedNativeQuery-example[] -//tag::sql-multiple-scalar-values-NamedNativeQuery-example[] -@NamedNativeQuery( - name = "find_person_name_and_nickName", - query = - "SELECT " + - " name, " + - " nickName " + - "FROM Person " -) -//end::sql-multiple-scalar-values-NamedNativeQuery-example[] -// tag::sql-multiple-scalar-values-dto-NamedNativeQuery-example[] -@NamedNativeQuery( - name = "find_person_name_and_nickName_dto", - query = - "select " + - " name, " + - " nickName " + - "from Person ", - resultSetMapping = "name_and_nickName_dto" -) -//end::sql-multiple-scalar-values-dto-NamedNativeQuery-example[] -//tag::sql-entity-NamedNativeQuery-example[] -@NamedNativeQuery( - name = "find_person_by_name", - query = - "select " + - " p.id AS \"id\", " + - " p.name AS \"name\", " + - " p.nickName AS \"nickName\", " + - " p.address AS \"address\", " + - " p.createdOn AS \"createdOn\", " + - " p.version AS \"version\" " + - "from Person p " + - "where p.name LIKE :name", - resultClass = Person.class -) -//end::sql-entity-NamedNativeQuery-example[] -//tag::sql-entity-associations-NamedNativeQuery-example[] -@NamedNativeQuery( - name = "find_person_with_phones_by_name", - query = - "select " + - " pr.id AS \"pr.id\", " + - " pr.name AS \"pr.name\", " + - " pr.nickName AS \"pr.nickName\", " + - " pr.address AS \"pr.address\", " + - " pr.createdOn AS \"pr.createdOn\", " + - " pr.version AS \"pr.version\", " + - " ph.id AS \"ph.id\", " + - " ph.person_id AS \"ph.person_id\", " + - " ph.phone_number AS \"ph.number\", " + - " ph.phone_type AS \"ph.type\" " + - "from Person pr " + - "join Phone ph ON pr.id = ph.person_id " + - "where pr.name LIKE :name", - resultSetMapping = "person_with_phones" -) -//end::sql-entity-associations-NamedNativeQuery-example[] -//tag::sql-entity-associations-NamedNativeQuery-example[] -@SqlResultSetMapping( - name = "person_with_phones", - entities = { - @EntityResult( - entityClass = Person.class, - fields = { - @FieldResult( name = "id", column = "pr.id" ), - @FieldResult( name = "name", column = "pr.name" ), - @FieldResult( name = "nickName", column = "pr.nickName" ), - @FieldResult( name = "address", column = "pr.address" ), - @FieldResult( name = "createdOn", column = "pr.createdOn" ), - @FieldResult( name = "version", column = "pr.version" ), - } - ), - @EntityResult( - entityClass = Phone.class, - fields = { - @FieldResult( name = "id", column = "ph.id" ), - @FieldResult( name = "person", column = "ph.person_id" ), - @FieldResult( name = "number", column = "ph.number" ), - @FieldResult( name = "type", column = "ph.type" ), - } - ) - } - ) -//end::sql-entity-associations-NamedNativeQuery-example[] -//tag::sql-multiple-scalar-values-dto-NamedNativeQuery-example[] -@SqlResultSetMapping( - name = "name_and_nickName_dto", - classes = @ConstructorResult( - targetClass = PersonNames.class, - columns = { - @ColumnResult(name = "name"), - @ColumnResult(name = "nickName") - } - ) -) -//end::sql-multiple-scalar-values-dto-NamedNativeQuery-example[] -//tag::hql-examples-domain-model-example[] -//tag::jpql-api-named-query-example[] -@NamedQuery( - name = "get_person_by_name", - query = "select p from Person p where name = :name" -) -//end::jpql-api-named-query-example[] -// tag::jpa-read-only-entities-native-example[] -@NamedQuery( - name = "get_read_only_person_by_name", - query = "select p from Person p where name = :name", - hints = { - @QueryHint( - name = "org.hibernate.readOnly", - value = "true" - ) - } -) -//end::jpa-read-only-entities-native-example[] -@NamedQuery( - name = "delete_person", - query = "delete Person" -) -//tag::sql-sp-ref-cursor-oracle-named-query-example[] -@NamedStoredProcedureQuery( - name = "sp_person_phones", - procedureName = "sp_person_phones", - parameters = { - @StoredProcedureParameter( - name = "personId", - type = Long.class, - mode = ParameterMode.IN - ), - @StoredProcedureParameter( - name = "personPhones", - type = Class.class, - mode = ParameterMode.REF_CURSOR - ) - } -) -//end::sql-sp-ref-cursor-oracle-named-query-example[] -@Entity -public class Person { - - @Id - @GeneratedValue - private Long id; - - private String name; - - private String nickName; - - private String address; - - private LocalDateTime createdOn; - - @OneToMany(mappedBy = "person", cascade = CascadeType.ALL) - @OrderColumn(name = "order_id") - private List phones = new ArrayList<>(); - - @ElementCollection - @MapKeyEnumerated(EnumType.STRING) - private Map addresses = new HashMap<>(); - - @Version - private int version; - - //Getters and setters are omitted for brevity - -//end::hql-examples-domain-model-example[] - - public Person() {} - - public Person(String name) { - this.name = name; - } - - public Long getId() { - return id; - } - - public String getName() { - return name; - } - - public String getNickName() { - return nickName; - } - - public void setNickName(String nickName) { - this.nickName = nickName; - } - - public String getAddress() { - return address; - } - - public void setAddress(String address) { - this.address = address; - } - - public LocalDateTime getCreatedOn() { - return createdOn; - } - - public void setCreatedOn(LocalDateTime createdOn) { - this.createdOn = createdOn; - } - - public List getPhones() { - return phones; - } - - public Map getAddresses() { - return addresses; - } - - public void addPhone(Phone phone) { - phones.add( phone ); - phone.setPerson( this ); - } -//tag::hql-examples-domain-model-example[] -} -//end::hql-examples-domain-model-example[] diff --git a/documentation/src/main/java/org/hibernate/userguide/model/PersonPhoneCount.java b/documentation/src/main/java/org/hibernate/userguide/model/PersonPhoneCount.java deleted file mode 100644 index e8cbe902c6c8..000000000000 --- a/documentation/src/main/java/org/hibernate/userguide/model/PersonPhoneCount.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.userguide.model; - -/** - * @author Vlad Mihalcea - */ -public class PersonPhoneCount { - - private final String name; - - private final Number phoneCount; - - public PersonPhoneCount(String name, Number phoneCount) { - this.name = name; - this.phoneCount = phoneCount; - } - - public String getName() { - return name; - } - - public Number getPhoneCount() { - return phoneCount; - } -} diff --git a/documentation/src/main/java/org/hibernate/userguide/model/Phone.java b/documentation/src/main/java/org/hibernate/userguide/model/Phone.java deleted file mode 100644 index 671ca8887ae8..000000000000 --- a/documentation/src/main/java/org/hibernate/userguide/model/Phone.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.userguide.model; - -import java.time.LocalDateTime; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import jakarta.persistence.CascadeType; -import jakarta.persistence.Column; -import jakarta.persistence.ColumnResult; -import jakarta.persistence.ConstructorResult; -import jakarta.persistence.ElementCollection; -import jakarta.persistence.Entity; -import jakarta.persistence.EnumType; -import jakarta.persistence.Enumerated; -import jakarta.persistence.FetchType; -import jakarta.persistence.Id; -import jakarta.persistence.ManyToOne; -import jakarta.persistence.MapKey; -import jakarta.persistence.OneToMany; -import jakarta.persistence.SqlResultSetMapping; - -import org.hibernate.annotations.NamedNativeQuery; -import org.hibernate.annotations.NamedQuery; - -/** - * @author Vlad Mihalcea - */ -//tag::jpql-api-hibernate-named-query-example[] -@NamedQuery( - name = "get_phone_by_number", - query = "select p " + - "from Phone p " + - "where p.number = :number", - timeout = 1, - readOnly = true -) -//end::jpql-api-hibernate-named-query-example[] -//tag::sql-multiple-scalar-values-dto-NamedNativeQuery-hibernate-example[] -@NamedNativeQuery( - name = "get_person_phone_count", - query = "select pr.name AS name, count(*) AS phoneCount " + - "from Phone p " + - "join Person pr ON pr.id = p.person_id " + - "group BY pr.name", - resultSetMapping = "person_phone_count", - timeout = 1, - readOnly = true -) -@SqlResultSetMapping( - name = "person_phone_count", - classes = @ConstructorResult( - targetClass = PersonPhoneCount.class, - columns = { - @ColumnResult(name = "name"), - @ColumnResult(name = "phoneCount") - } - ) -) -//end::sql-multiple-scalar-values-dto-NamedNativeQuery-hibernate-example[] -//tag::hql-examples-domain-model-example[] -@Entity -public class Phone { - - @Id - private Long id; - - @ManyToOne(fetch = FetchType.LAZY) - private Person person; - - @Column(name = "phone_number") - private String number; - - @Enumerated(EnumType.STRING) - @Column(name = "phone_type") - private PhoneType type; - - @OneToMany(mappedBy = "phone", cascade = CascadeType.ALL, orphanRemoval = true) - private List calls = new ArrayList<>( ); - - //tag::hql-collection-qualification-example[] - @OneToMany(mappedBy = "phone") - @MapKey(name = "timestamp") - private Map callHistory = new HashMap<>(); - //end::hql-collection-qualification-example[] - - @ElementCollection - private List repairTimestamps = new ArrayList<>( ); - - //Getters and setters are omitted for brevity - -//end::hql-examples-domain-model-example[] - public Phone() {} - - public Phone(String number) { - this.number = number; - } - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getNumber() { - return number; - } - - public Person getPerson() { - return person; - } - - public void setPerson(Person person) { - this.person = person; - } - - public PhoneType getType() { - return type; - } - - public void setType(PhoneType type) { - this.type = type; - } - - public List getCalls() { - return calls; - } - - public Map getCallHistory() { - return callHistory; - } - - public List getRepairTimestamps() { - return repairTimestamps; - } - - public void addCall(Call call) { - calls.add( call ); - callHistory.put( call.getTimestamp(), call ); - call.setPhone( this ); - } -//tag::hql-examples-domain-model-example[] -} -//end::hql-examples-domain-model-example[] diff --git a/documentation/src/main/style/asciidoctor/css/asciidoctor.css b/documentation/src/main/style/asciidoctor/css/asciidoctor.css index 5f9001c0157f..9258eefbe296 100644 --- a/documentation/src/main/style/asciidoctor/css/asciidoctor.css +++ b/documentation/src/main/style/asciidoctor/css/asciidoctor.css @@ -349,8 +349,8 @@ span.icon>.fa{cursor:default} .admonitionblock td.icon .icon-warning:before{content:"\f071";color:#bf6900} .admonitionblock td.icon .icon-caution:before{content:"\f06d";color:#bf3400} .admonitionblock td.icon .icon-important:before{content:"\f06a";color:#bf0000} -.conum[data-value]{display:inline-block;color:#fff!important;background-color:rgba(0,0,0,.8);-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold} -.conum[data-value] *{color:#fff!important} +.conum[data-value]{display:inline-block;color:black!important;background-color:white;-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold} +.conum[data-value] *{color:black!important} .conum[data-value]+b{display:none} .conum[data-value]:after{content:attr(data-value)} pre .conum[data-value]{position:relative;top:-.125em} diff --git a/documentation/src/main/style/asciidoctor/css/hibernate-layout.css b/documentation/src/main/style/asciidoctor/css/hibernate-layout.css index 1c4375f78b55..61b5f073f304 100644 --- a/documentation/src/main/style/asciidoctor/css/hibernate-layout.css +++ b/documentation/src/main/style/asciidoctor/css/hibernate-layout.css @@ -1,6 +1,9 @@ -html -{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%} -body{ +html { + font-family:sans-serif; + -ms-text-size-adjust:100%; + -webkit-text-size-adjust:100% +} +body { position:relative !important; cursor:auto !important; } @@ -11,8 +14,18 @@ body:before { #header { width: 1000px !important; } -#toctitle,.sidebarblock>.content>.title,h4,h5,h6{ - font-family:"Open Sans","DejaVu Sans",sans-serif !important; +.sidebarblock>.content>.title,h4,h5,h6 { + font-family: 'Noto Serif', "Open Sans", "DejaVu Sans", sans-serif !important; + font-size: 1.3em !important; + margin-top:0em !important; + margin-bottom:1em !important; + line-height:1.0125em !important +} +#toctitle { + font-family: 'Noto Serif', "Open Sans", "DejaVu Sans", sans-serif !important; +} +#toctitle>.content>.title,h4,h5,h6 { + font-family: 'Noto Serif', "Open Sans", "DejaVu Sans", sans-serif !important; font-weight:300 !important; font-style:normal !important; font-size: 1.3em !important; @@ -35,33 +48,32 @@ body:before { #toc>#tocsearch { font-family: "FontAwesome"; } +#author, #revnumber { + font-size: 1.1em !important; + margin-bottom: 0.2em !important; +} .subheader .title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{ color:darkslategray; } -h1{ - font-size: 1.9em !important; +h1 { + font-size: 2.5em !important; color:#182737 !important; - line-height: 1.2 !important; - font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; + font-family: 'Noto Serif', 'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; font-weight: bold !important; - text-align: justify !important; + text-align: center !important; text-decoration: none !important; width:100% !important; - height: 103px !important; - margin-top:1.1em !important; - margin-bottom:.2em !important; - line-height:1em !important; - padding-top: 2em !important; + margin-top:1.5em !important; + margin-bottom: 1em !important; } -h2{ +h2 { font-weight: bold !important; font-size: 1.7em !important; - line-height: 3em !important; - font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; + line-height: 2em !important; + font-family: 'Noto Serif', 'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; color:#4a5d75 !important; - margin-top:3em !important; - margin-bottom:1em !important; - line-height:1.6em !important; + margin-top: 2em !important; + margin-bottom: 0.5em !important; background-position: bottom !important; background-repeat: repeat-x !important; background-size: 75px; @@ -69,19 +81,28 @@ h2{ height: 60px !important; } h3 { + font-weight: bold !important; text-decoration: none !important; color:#4a5d75 !important; - font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; + font-family: 'Noto Serif', 'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; font-size: 1.4em !important; - margin-top:1em !important; - margin-bottom:.5em !important; + margin-top: 1.4em !important; + margin-bottom: 1.0em !important; line-height:1.0125em !important } -h4,h5,h6{ +h4{ +font-weight: bold !important; + font-size:1.125em !important; + text-decoration: none !important; + color:#4a5d75 !important; + font-family: 'Noto Serif', 'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important + margin-top: 1em !important; + margin-bottom: 0.8em !important; +}h4,h5,h6{ font-size:1.125em !important; text-decoration: none !important; color:#4a5d75 !important; - font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important + font-family: 'Noto Serif', 'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important } .admonitionblock{ background-color: antiquewhite !important; @@ -191,7 +212,7 @@ ul { padding-top: .5em; font-weight: lighter !important; color:#4a5d75 !important; - font-family:'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; + font-family: 'Noto Serif', 'Lucida Grande', Geneva, Verdana, Arial, sans-serif !important; font-size: 1em !important; } em,i{ diff --git a/documentation/src/main/style/asciidoctor/js/toc.js b/documentation/src/main/style/asciidoctor/js/toc.js index ae20217d4fc0..ec434553b538 100644 --- a/documentation/src/main/style/asciidoctor/js/toc.js +++ b/documentation/src/main/style/asciidoctor/js/toc.js @@ -1,5 +1,7 @@ var versions = { 'current' : '/current/userguide/html_single/Hibernate_User_Guide.html', + '6.1' : '/6.1/userguide/html_single/Hibernate_User_Guide.html', + '6.0' : '/6.0/userguide/html_single/Hibernate_User_Guide.html', '5.6' : '/5.6/userguide/html_single/Hibernate_User_Guide.html', '5.5' : '/5.5/userguide/html_single/Hibernate_User_Guide.html', '5.4' : '/5.4/userguide/html_single/Hibernate_User_Guide.html', diff --git a/documentation/src/main/style/pdf/fonts/Inconsolata-Light.ttf b/documentation/src/main/style/pdf/fonts/Inconsolata-Light.ttf new file mode 100644 index 000000000000..ba3a83d5e983 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/Inconsolata-Light.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/NotoEmoji.ttf b/documentation/src/main/style/pdf/fonts/NotoEmoji.ttf new file mode 100644 index 000000000000..b8aa818517fc Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/NotoEmoji.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/OpenSansEmoji.ttf b/documentation/src/main/style/pdf/fonts/OpenSansEmoji.ttf new file mode 100644 index 000000000000..57d86a62bbc9 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/OpenSansEmoji.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-Black.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-Black.ttf new file mode 100644 index 000000000000..9acf5854fc71 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-Black.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-BlackItalic.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-BlackItalic.ttf new file mode 100644 index 000000000000..4bd30ff9ff17 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-BlackItalic.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-Bold.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-Bold.ttf new file mode 100644 index 000000000000..388869cdd74e Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-Bold.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-BoldItalic.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-BoldItalic.ttf new file mode 100644 index 000000000000..2e10a398d2ea Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-BoldItalic.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-ExtraLight.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-ExtraLight.ttf new file mode 100644 index 000000000000..3ba2f35bfc70 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-ExtraLight.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-ExtraLightItalic.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-ExtraLightItalic.ttf new file mode 100644 index 000000000000..b814440ffdec Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-ExtraLightItalic.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-Italic.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-Italic.ttf new file mode 100644 index 000000000000..01223b86e268 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-Italic.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-Light.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-Light.ttf new file mode 100644 index 000000000000..e9008d329ffb Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-Light.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-LightItalic.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-LightItalic.ttf new file mode 100644 index 000000000000..a9a32e4273c1 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-LightItalic.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-Regular.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-Regular.ttf new file mode 100644 index 000000000000..5447a5ff93de Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-Regular.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-SemiBold.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-SemiBold.ttf new file mode 100644 index 000000000000..9f8a34539834 Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-SemiBold.ttf differ diff --git a/documentation/src/main/style/pdf/fonts/SourceSansPro-SemiBoldItalic.ttf b/documentation/src/main/style/pdf/fonts/SourceSansPro-SemiBoldItalic.ttf new file mode 100644 index 000000000000..65a76bfdf39d Binary files /dev/null and b/documentation/src/main/style/pdf/fonts/SourceSansPro-SemiBoldItalic.ttf differ diff --git a/documentation/src/main/style/pdf/theme.yml b/documentation/src/main/style/pdf/theme.yml new file mode 100644 index 000000000000..987c5de1b89b --- /dev/null +++ b/documentation/src/main/style/pdf/theme.yml @@ -0,0 +1,122 @@ +extends: base +page: + margin: [30,50,30,50] +font: + catalog: + merge: false + Source Sans Pro: + normal: SourceSansPro-Regular.ttf + italic: SourceSansPro-Italic.ttf + bold: SourceSansPro-Bold.ttf + bold_italic: SourceSansPro-BoldItalic.ttf + Source Sans Pro Light: + normal: SourceSansPro-Light.ttf + italic: SourceSansPro-LightItalic.ttf + bold: SourceSansPro-SemiBold.ttf + bold_italic: SourceSansPro-SemiBoldItalic.ttf + Inconsolata Light: + normal: Inconsolata-Light.ttf + bold: Inconsolata-Light.ttf + italic: Inconsolata-Light.ttf + OpenSansEmoji: + normal: OpenSansEmoji.ttf + bold: OpenSansEmoji.ttf + italic: OpenSansEmoji.ttf + bold_italic: OpenSansEmoji.ttf + NotoEmoji: + normal: NotoEmoji.ttf + bold: NotoEmoji.ttf + italic: NotoEmoji.ttf + bold_italic: NotoEmoji.ttf + fallbacks: + - OpenSansEmoji + - NotoEmoji + - Source Sans Pro +base: + font: + color: #151e3d + family: Source Sans Pro + size: 9 + line-height-length: 11.5 + line-height: $base-line-height-length / $base-font-size +prose: + margin-bottom: 8 +image: + width: 70% + align: center +codespan: + font: + size: 0.94em + family: Inconsolata Light + color: #281e5d +code: + font: + size: 0.94em + color: #281e5d + family: Inconsolata Light + border-width: 0 + padding: [4,4,4,20] +# background-color: #f7f7f7 +sidebar: + border-width: 0 + title: + align: center +admonition: + label: + vertical-align: top + padding: [4, 8, 4, 8] + column-rule: + style: solid + width: 3 + color: #f0f0f0 + icon: + tip: + stroke-color: #FFC300 + warning: + stroke-color: #FF5733 + caution: + stroke-color: #FF5733 +heading: + font: + color: #b22222 + size: 11 + style: bold + line-height: 1.2 + h2-font-size: $base-font-size * 1.3 + h3-font-size: $base-font-size * 1.2 + h4-font-size: $base-font-size * 1.1 + h5-font-size: $base-font-size * 1.0 + margin-bottom: $base-line-height-length +link: + font-color: #002FA7 +list: + indent: $base-font-size * 1.5 + item-spacing: 2 +table: + font-size: 0.94em + caption: + text-align: center + side: top + font-size: 0.94em + grid: + color: #f0f0f0 + style: solid + width: 1 + border: + width: 1 + color: #f0f0f0 + head: + background-color: #f0f0f0 + cell: + padding: 6 +footer: + border-width: 0 +quote: + font-style: italic + font-color: #b22222 + font-size: 1.1em +# background-color: #f1f1f1 + border-color: #000000 + border-radius: 2 + border-style: dotted + padding: [10,20,10,25] \ No newline at end of file diff --git a/documentation/src/test/java/org/hibernate/userguide/immutability/EntityImmutabilityTest.java b/documentation/src/test/java/org/hibernate/userguide/immutability/EntityImmutabilityTest.java deleted file mode 100644 index 345ea9b5fa74..000000000000 --- a/documentation/src/test/java/org/hibernate/userguide/immutability/EntityImmutabilityTest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.userguide.immutability; - -import java.util.Date; -import jakarta.persistence.Entity; -import jakarta.persistence.Id; - -import org.hibernate.annotations.Immutable; -import org.hibernate.orm.test.jpa.BaseEntityManagerFunctionalTestCase; - -import org.junit.Test; - -import static org.hibernate.testing.transaction.TransactionUtil.doInJPA; -import static org.junit.Assert.assertEquals; - -/** - * @author Vlad Mihalcea - */ -public class EntityImmutabilityTest extends BaseEntityManagerFunctionalTestCase { - - @Override - protected Class[] getAnnotatedClasses() { - return new Class[] { - Event.class - }; - } - - @Test - public void test() { - //tag::entity-immutability-persist-example[] - doInJPA(this::entityManagerFactory, entityManager -> { - Event event = new Event(); - event.setId(1L); - event.setCreatedOn(new Date()); - event.setMessage("Hibernate User Guide rocks!"); - - entityManager.persist(event); - }); - //end::entity-immutability-persist-example[] - //tag::entity-immutability-update-example[] - doInJPA(this::entityManagerFactory, entityManager -> { - Event event = entityManager.find(Event.class, 1L); - log.info("Change event message"); - event.setMessage("Hibernate User Guide"); - }); - doInJPA(this::entityManagerFactory, entityManager -> { - Event event = entityManager.find(Event.class, 1L); - assertEquals("Hibernate User Guide rocks!", event.getMessage()); - }); - //end::entity-immutability-update-example[] - } - - //tag::entity-immutability-example[] - @Entity(name = "Event") - @Immutable - public static class Event { - - @Id - private Long id; - - private Date createdOn; - - private String message; - - //Getters and setters are omitted for brevity - - //end::entity-immutability-example[] - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Date getCreatedOn() { - return createdOn; - } - - public void setCreatedOn(Date createdOn) { - this.createdOn = createdOn; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - //tag::entity-immutability-example[] - } - //end::entity-immutability-example[] -} diff --git a/documentation/src/test/java/org/hibernate/userguide/mapping/basic/BasicCollectionMappingTests.java b/documentation/src/test/java/org/hibernate/userguide/mapping/basic/BasicCollectionMappingTests.java deleted file mode 100644 index a57413ae9ac6..000000000000 --- a/documentation/src/test/java/org/hibernate/userguide/mapping/basic/BasicCollectionMappingTests.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later - * See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html - */ -package org.hibernate.userguide.mapping.basic; - -import java.util.List; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; - -import org.hibernate.metamodel.mapping.JdbcMapping; -import org.hibernate.metamodel.mapping.internal.BasicAttributeMapping; -import org.hibernate.metamodel.spi.MappingMetamodelImplementor; -import org.hibernate.persister.entity.EntityPersister; - -import org.hibernate.testing.orm.junit.DomainModel; -import org.hibernate.testing.orm.junit.SessionFactory; -import org.hibernate.testing.orm.junit.SessionFactoryScope; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Test; - -import jakarta.persistence.Entity; -import jakarta.persistence.Id; -import jakarta.persistence.Table; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.equalTo; - -/** - * Tests for mapping basic collections - */ -@DomainModel(annotatedClasses = BasicCollectionMappingTests.EntityOfCollections.class) -@SessionFactory -public class BasicCollectionMappingTests { - - @Test - public void testMappings(SessionFactoryScope scope) { - // first, verify the type selections... - final MappingMetamodelImplementor mappingMetamodel = scope.getSessionFactory() - .getRuntimeMetamodels() - .getMappingMetamodel(); - final EntityPersister entityDescriptor = mappingMetamodel.findEntityDescriptor( EntityOfCollections.class); - - { - final BasicAttributeMapping attribute = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("list"); - assertThat( attribute.getJavaType().getJavaTypeClass(), equalTo( List.class)); - - final JdbcMapping jdbcMapping = attribute.getJdbcMapping(); - assertThat(jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo(List.class)); - } - - { - final BasicAttributeMapping attribute = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("sortedSet"); - assertThat( attribute.getJavaType().getJavaTypeClass(), equalTo( SortedSet.class)); - - final JdbcMapping jdbcMapping = attribute.getJdbcMapping(); - assertThat(jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo(SortedSet.class)); - } - - - // and try to use the mapping - scope.inTransaction( - (session) -> session.persist( - new EntityOfCollections( - 1, - List.of( (short) 3 ), - new TreeSet<>( Set.of( (short) 5 ) ) - ) - ) - ); - scope.inTransaction( - (session) -> session.get( EntityOfCollections.class, 1) - ); - } - - @AfterEach - public void dropData(SessionFactoryScope scope) { - scope.inTransaction( - (session) -> session.createMutationQuery("delete EntityOfCollections").executeUpdate() - ); - } - - @Entity(name = "EntityOfCollections") - @Table(name = "EntityOfCollections") - public static class EntityOfCollections { - @Id - Integer id; - - //tag::basic-collection-example[] - List list; - SortedSet sortedSet; - //end::basic-collection-example[] - - public EntityOfCollections() { - } - - public EntityOfCollections(Integer id, List list, SortedSet sortedSet) { - this.id = id; - this.list = list; - this.sortedSet = sortedSet; - } - } -} diff --git a/documentation/src/test/java/org/hibernate/userguide/mapping/basic/BooleanMappingTests.java b/documentation/src/test/java/org/hibernate/userguide/mapping/basic/BooleanMappingTests.java deleted file mode 100644 index 620508fc509f..000000000000 --- a/documentation/src/test/java/org/hibernate/userguide/mapping/basic/BooleanMappingTests.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later - * See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html - */ -package org.hibernate.userguide.mapping.basic; - -import java.sql.Types; -import jakarta.persistence.Basic; -import jakarta.persistence.Convert; -import jakarta.persistence.Entity; -import jakarta.persistence.Id; -import jakarta.persistence.Table; - -import org.hibernate.metamodel.mapping.JdbcMapping; -import org.hibernate.metamodel.mapping.internal.BasicAttributeMapping; -import org.hibernate.metamodel.spi.MappingMetamodelImplementor; -import org.hibernate.persister.entity.EntityPersister; -import org.hibernate.type.internal.ConvertedBasicTypeImpl; - -import org.hibernate.testing.orm.junit.DomainModel; -import org.hibernate.testing.orm.junit.SessionFactory; -import org.hibernate.testing.orm.junit.SessionFactoryScope; -import org.junit.jupiter.api.Test; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.isOneOf; - -/** - * Tests for mapping boolean values - * - * @author Steve Ebersole - */ -@DomainModel(annotatedClasses = BooleanMappingTests.EntityOfBooleans.class) -@SessionFactory -public class BooleanMappingTests { - @Test - public void verifyMappings(SessionFactoryScope scope) { - final MappingMetamodelImplementor mappingMetamodel = scope.getSessionFactory() - .getRuntimeMetamodels() - .getMappingMetamodel(); - final EntityPersister entityDescriptor = mappingMetamodel.getEntityDescriptor(EntityOfBooleans.class); - - { - final BasicAttributeMapping implicit = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("implicit"); - final JdbcMapping jdbcMapping = implicit.getJdbcMapping(); - assertThat(jdbcMapping.getJavaTypeDescriptor().getJavaType(), equalTo(Boolean.class)); - assertThat( - jdbcMapping.getJdbcType().getJdbcTypeCode(), - // the implicit mapping will depend on the Dialect - isOneOf( Types.BOOLEAN, Types.BIT, Types.TINYINT, Types.SMALLINT ) - ); - } - - - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // Converters - - { - final BasicAttributeMapping convertedYesNo = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("convertedYesNo"); - final JdbcMapping jdbcMapping = convertedYesNo.getJdbcMapping(); - assertThat( jdbcMapping, instanceOf( ConvertedBasicTypeImpl.class ) ); - assertThat( jdbcMapping.getJdbcJavaType().getJavaType(), equalTo( Character.class ) ); - assertThat( - jdbcMapping.getJdbcType().getJdbcTypeCode(), - // could be NCHAR if nationalization is globally enabled - isOneOf( Types.CHAR, Types.NCHAR ) - ); - } - - { - final BasicAttributeMapping convertedTrueFalse = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("convertedTrueFalse"); - final JdbcMapping jdbcMapping = convertedTrueFalse.getJdbcMapping(); - assertThat( jdbcMapping, instanceOf( ConvertedBasicTypeImpl.class ) ); - assertThat( jdbcMapping.getJdbcJavaType().getJavaType(), equalTo( Character.class ) ); - assertThat( - jdbcMapping.getJdbcType().getJdbcTypeCode(), - // could be NCHAR if nationalization is globally enabled - isOneOf( Types.CHAR, Types.NCHAR ) - ); - } - - { - final BasicAttributeMapping convertedNumeric = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("convertedNumeric"); - final JdbcMapping jdbcMapping = convertedNumeric.getJdbcMapping(); - assertThat( jdbcMapping, instanceOf( ConvertedBasicTypeImpl.class ) ); - assertThat( jdbcMapping.getJdbcJavaType().getJavaType(), equalTo( Integer.class ) ); - assertThat( - jdbcMapping.getJdbcType().getJdbcTypeCode(), - equalTo( Types.INTEGER ) - ); - } - - - } - - @Entity(name = "EntityOfBooleans") - @Table(name = "EntityOfBooleans") - public static class EntityOfBooleans { - @Id - Integer id; - - //tag::basic-boolean-example-implicit[] - // this will be mapped to BIT or BOOLEAN on the database - @Basic - boolean implicit; - //end::basic-boolean-example-implicit[] - - - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // converted - - //tag::basic-boolean-example-explicit-yes-no[] - // this will get mapped to CHAR or NCHAR with a conversion - @Basic - @Convert(converter = org.hibernate.type.YesNoConverter.class) - boolean convertedYesNo; - //end::basic-boolean-example-explicit-yes-no[] - - //tag::basic-boolean-example-explicit-t-f[] - // this will get mapped to CHAR or NCHAR with a conversion - @Basic - @Convert(converter = org.hibernate.type.TrueFalseConverter.class) - boolean convertedTrueFalse; - //end::basic-boolean-example-explicit-t-f[] - - //tag::basic-boolean-example-explicit-numeric[] - // this will get mapped to TINYINT with a conversion - @Basic - @Convert(converter = org.hibernate.type.NumericBooleanConverter.class) - boolean convertedNumeric; - //end::basic-boolean-example-explicit-numeric[] - } -} diff --git a/documentation/src/test/java/org/hibernate/userguide/osgi/_native/HibernateUtil.java b/documentation/src/test/java/org/hibernate/userguide/osgi/_native/HibernateUtil.java deleted file mode 100644 index 474964a61c91..000000000000 --- a/documentation/src/test/java/org/hibernate/userguide/osgi/_native/HibernateUtil.java +++ /dev/null @@ -1,35 +0,0 @@ -package org.hibernate.userguide.osgi._native; - -import org.hibernate.Session; -import org.hibernate.SessionFactory; - -import org.osgi.framework.Bundle; -import org.osgi.framework.BundleContext; -import org.osgi.framework.FrameworkUtil; -import org.osgi.framework.ServiceReference; - -//tag::osgi-discover-SessionFactory[] -public class HibernateUtil { - - private SessionFactory sf; - - public Session getSession() { - return getSessionFactory().openSession(); - } - - private SessionFactory getSessionFactory() { - if (sf == null) { - Bundle thisBundle = FrameworkUtil.getBundle( - HibernateUtil.class - ); - BundleContext context = thisBundle.getBundleContext(); - - ServiceReference sr = context.getServiceReference( - SessionFactory.class.getName() - ); - sf = (SessionFactory) context.getService(sr); - } - return sf; - } -} -//end::osgi-discover-SessionFactory[] \ No newline at end of file diff --git a/documentation/src/test/java/org/hibernate/userguide/osgi/jpa/HibernateUtil.java b/documentation/src/test/java/org/hibernate/userguide/osgi/jpa/HibernateUtil.java deleted file mode 100644 index e5b3211ec02f..000000000000 --- a/documentation/src/test/java/org/hibernate/userguide/osgi/jpa/HibernateUtil.java +++ /dev/null @@ -1,44 +0,0 @@ -package org.hibernate.userguide.osgi.jpa; - -import jakarta.persistence.EntityManager; -import jakarta.persistence.EntityManagerFactory; -import jakarta.persistence.spi.PersistenceProvider; - -import org.osgi.framework.Bundle; -import org.osgi.framework.BundleContext; -import org.osgi.framework.FrameworkUtil; -import org.osgi.framework.ServiceReference; - -//tag::osgi-discover-EntityManagerFactory[] -public class HibernateUtil { - - private EntityManagerFactory emf; - - public EntityManager getEntityManager() { - return getEntityManagerFactory().createEntityManager(); - } - - private EntityManagerFactory getEntityManagerFactory() { - if (emf == null) { - Bundle thisBundle = FrameworkUtil.getBundle( - HibernateUtil.class - ); - BundleContext context = thisBundle.getBundleContext(); - - ServiceReference serviceReference = context.getServiceReference( - PersistenceProvider.class.getName() - ); - PersistenceProvider persistenceProvider = (PersistenceProvider) context - .getService( - serviceReference - ); - - emf = persistenceProvider.createEntityManagerFactory( - "YourPersistenceUnitName", - null - ); - } - return emf; - } -} -//end::osgi-discover-EntityManagerFactory[] diff --git a/documentation/src/test/resources/hibernate.properties b/documentation/src/test/resources/hibernate.properties deleted file mode 100644 index 968847a8f3f7..000000000000 --- a/documentation/src/test/resources/hibernate.properties +++ /dev/null @@ -1,25 +0,0 @@ -# -# Hibernate, Relational Persistence for Idiomatic Java -# -# License: GNU Lesser General Public License (LGPL), version 2.1 or later. -# See the lgpl.txt file in the root directory or . -# - -hibernate.dialect @db.dialect@ -hibernate.connection.driver_class @jdbc.driver@ -hibernate.connection.url @jdbc.url@ -hibernate.connection.username @jdbc.user@ -hibernate.connection.password @jdbc.pass@ -hibernate.connection.init_sql @connection.init_sql@ - -hibernate.connection.pool_size 5 - -hibernate.format_sql true -hibernate.max_fetch_depth 5 - -hibernate.cache.region_prefix hibernate.test -hibernate.cache.region.factory_class org.hibernate.testing.cache.CachingRegionFactory - -hibernate.jdbc.batch_size 0 - -hibernate.service.allow_crawling=false diff --git a/documentation/src/test/resources/log4j2.properties b/documentation/src/test/resources/log4j2.properties deleted file mode 100644 index 589c854e62e3..000000000000 --- a/documentation/src/test/resources/log4j2.properties +++ /dev/null @@ -1,94 +0,0 @@ -# -# Hibernate, Relational Persistence for Idiomatic Java -# -# License: GNU Lesser General Public License (LGPL), version 2.1 or later. -# See the lgpl.txt file in the root directory or . -# -appender.stdout.type=Console -appender.stdout.name=STDOUT -appender.stdout.layout.type=PatternLayout -appender.stdout.layout.pattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n - - -appender.subsystem.name=subsystem -appender.subsystem.type=Console -appender.subsystem.layout.type=PatternLayout -appender.subsystem.layout.pattern=[subsystem] %5p %15.25c{5} %C{1}:%L - %m%n - -logger.subsystem-root.name=org.hibernate.orm -logger.subsystem-root.level=info -logger.subsystem-root.additivity=false -logger.subsystem-root.appenderRef.subsystem.ref=subsystem - -logger.jdbc-bind.name=org.hibernate.orm.jdbc.bind -logger.jdbc-bind.level=trace - -logger.jdbc-extract.name=org.hibernate.orm.jdbc.extract -logger.jdbc-extract.level=trace - - - -rootLogger.level=info -rootLogger.appenderRef.stdout.ref=STDOUT - -logger.hibernate.name=org.hibernate -logger.hibernate.level=info -#logger.hibernate.level=warn - -logger.ejb.name=org.hibernate.ejb -logger.ejb.level=info -logger.ejb-packaging.name=org.hibernate.ejb.packaging -logger.ejb-packaging.level=info -logger.reflection.name=org.hibernate.reflection -logger.reflection.level=info - -logger.cascades.name=org.hibernate.engine.Cascades -#logger.cascades.level=warn - -### log just the SQL -logger.sql.name=org.hibernate.SQL -logger.sql.level=debug - -### log JDBC bind parameters ### -logger.hibernate-type.name=org.hibernate.type -logger.hibernate-type.level=trace -logger.type-sql.name=org.hibernate.type.descriptor.jdbc -logger.type-sql.level=trace -logger.table-generator.name=org.hibernate.id.enhanced.TableGenerator -logger.table-generator.level=trace -logger.identifier-generator-helper.name=org.hibernate.id.IdentifierGeneratorHelper -logger.identifier-generator-helper.level=trace -logger.abstract-entity-persister.name=org.hibernate.persister.entity.AbstractEntityPersister -logger.abstract-entity-persister.level=trace -logger.entity-reference-initializer-impl.name=org.hibernate.loader.plan.exec.process.internal.EntityReferenceInitializerImpl -logger.entity-reference-initializer-impl.level=trace - -### log schema export/update ### -logger.hbm2ddl.name=org.hibernate.tool.hbm2ddl -logger.hbm2ddl.level=info - -### log HQL parse trees -logger.hql.name=org.hibernate.hql -#logger.hql.level=warn - -### log cache activity ### -logger.cache.name=org.hibernate.cache -#logger.cache.level=warn - -### log JDBC resource acquisition -logger.hibernate-jdbc.name=org.hibernate.jdbc -#logger.hibernate-jdbc.level=warn - -### enable the following line if you want to track down connection ### -### leakages when using DriverManagerConnectionProvider ### -logger.driver-manager-connection-provider.name=org.hibernate.connection.DriverManagerConnectionProvider -#logger.driver-manager-connection-provider.level=trace - -### When entity copy merge functionality is enabled using: -### hibernate.event.merge.entity_copy_observer=log, the following will -### provide information about merged entity copies. -logger.entity-copy-allowed-logged-observer.name=org.hibernate.event.internal.EntityCopyAllowedLoggedObserver -#logger.entity-copy-allowed-logged-observer.level=warn - -logger.userguide.name=org.hibernate.userguide -logger.userguide.level=debug diff --git a/documentation/status.md b/documentation/status.md deleted file mode 100644 index f7f2b0ec68ca..000000000000 --- a/documentation/status.md +++ /dev/null @@ -1,83 +0,0 @@ -Status of the documentation overhaul (5.0 version) -================================================== - -Overall the plan is to define 3 DocBook-based guides. The intention is for this document to serve -as an outline of the work and a status of what still needs done. - -NOTE : entries marked with strike-through indicate that the content is believed to be done; review -would still be appreciated. - - -User Guide -========== - -Covers reference topics targeting users. - -* Prefix -* Architecture -* DomainModel -* Bootstrap -* PersistenceContext -* Database_Access -* Transactions -* JNDI -* Fetching - still need to document batch fetching, subselect fetching, extra laziness and EntityGraphs -* Flushing (to be written) -* Cascading (needs lots of work) -* Locking (needs some work) -* Batching (needs lot of work - not started - open questions) -* Caching (needs some work) -* Events (need some work) -* Query - HQL/JPQL -* Query - Criteria -* Query - Native (copy from old) -* Multi_Tenancy (needs some work) -* OSGi (right place for this?) -* Envers (right place for this?) -* Portability (needs some work) - - -Domain Model Mapping Guide -=========================== - -Covers mapping domain model to database. Note that a lot of the "not started" content exists elsewhere; its merely a -matter of pulling that content in and better organizing it. - - -* Prefix -* Data_Categorizations -* Entity (needs some work) -* Basic_Types -* Composition -* Collection (needs some work) -* Identifiers (mostly done - needs "derived id" stuff documented) -* Natural_Id -* Secondary_Tables (not started) - logically a joined in-line view -* Associations (not started) -* Attribute_Access (not started) -* Mapping_Overrides - AttributeOverrides/AssociationOverrides (not started) -* Generated_attributes (not started) -* "columns, formulas, read/write-fragments" (not started) -* Naming_Strategies - implicit, physical, quoting (not started) -* Database_Constraints - pk, fk, uk, check, etc (not started) -* Auxiliary_DB_Objects - does this belong here? or somewhere else (integrations guide) discussing schema tooling? - - -Integrations Guide -=================== - -* Services&Registries (pretty much done) -* IdGeneratorStrategyInterpreter (not started) -* custom Session/SessionFactory implementors (not started) -* ??? - - -Overall -======= - -* I really like the idea of each chapter having a title+abstract. See userGuide/chapters/HQL.xml - for an example. -* I really like the idea of each chapter having a "Related Topics" (?)sidebar(?). See - userGuide/chapters/HQL.xml for an example. I am not sure `` is the best element for - this concept, but I could not find a better one on cursory glance. I noticed `literallayout` used in - a few DocBook examples for something similar. diff --git a/drivers/.gitignore b/drivers/.gitignore new file mode 100644 index 000000000000..01b7e33fd470 --- /dev/null +++ b/drivers/.gitignore @@ -0,0 +1 @@ +**/* \ No newline at end of file diff --git a/drivers/README.adoc b/drivers/README.adoc new file mode 100644 index 000000000000..bd29262c7f0d --- /dev/null +++ b/drivers/README.adoc @@ -0,0 +1,4 @@ +== Extra JDBC Drivers + +This directory is a place to drop JDBC drivers (or any test related jars) to be added to the project's +`testRuntimeOnly` class-path. \ No newline at end of file diff --git a/edb/edb15.Dockerfile b/edb/edb15.Dockerfile new file mode 100644 index 000000000000..78a32e13fe37 --- /dev/null +++ b/edb/edb15.Dockerfile @@ -0,0 +1,48 @@ +FROM quay.io/enterprisedb/edb-postgres-advanced:15.4-3.3-postgis +USER root +# this 777 will be replaced by 700 at runtime (allows semi-arbitrary "--user" values) +RUN chown -R postgres:postgres /var/lib/edb && chmod 777 /var/lib/edb && rm /docker-entrypoint-initdb.d/10_postgis.sh + +USER postgres +ENV LANG en_US.utf8 +ENV PG_MAJOR 15 +ENV PG_VERSION 15 +ENV PGPORT 5444 +ENV PGDATA /var/lib/edb/as$PG_MAJOR/data/ +VOLUME /var/lib/edb/as$PG_MAJOR/data/ + +COPY docker-entrypoint.sh /usr/local/bin/ +ENTRYPOINT ["docker-entrypoint.sh"] + +# We set the default STOPSIGNAL to SIGINT, which corresponds to what PostgreSQL +# calls "Fast Shutdown mode" wherein new connections are disallowed and any +# in-progress transactions are aborted, allowing PostgreSQL to stop cleanly and +# flush tables to disk, which is the best compromise available to avoid data +# corruption. +# +# Users who know their applications do not keep open long-lived idle connections +# may way to use a value of SIGTERM instead, which corresponds to "Smart +# Shutdown mode" in which any existing sessions are allowed to finish and the +# server stops when all sessions are terminated. +# +# See https://www.postgresql.org/docs/12/server-shutdown.html for more details +# about available PostgreSQL server shutdown signals. +# +# See also https://www.postgresql.org/docs/12/server-start.html for further +# justification of this as the default value, namely that the example (and +# shipped) systemd service files use the "Fast Shutdown mode" for service +# termination. +# +STOPSIGNAL SIGINT +# +# An additional setting that is recommended for all users regardless of this +# value is the runtime "--stop-timeout" (or your orchestrator/runtime's +# equivalent) for controlling how long to wait between sending the defined +# STOPSIGNAL and sending SIGKILL (which is likely to cause data corruption). +# +# The default in most runtimes (such as Docker) is 10 seconds, and the +# documentation at https://www.postgresql.org/docs/12/server-start.html notes +# that even 90 seconds may not be long enough in many instances. + +EXPOSE 5444 +CMD ["postgres"] \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index 23147c47a67c..3d4d5a495eaf 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,8 +1,10 @@ # Keep all these properties in sync unless you know what you are doing! -org.gradle.jvmargs=-Xmx2g -XX:MaxMetaspaceSize=256m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 -toolchain.compiler.jvmargs=-Xmx2g -XX:MaxMetaspaceSize=256m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 -toolchain.javadoc.jvmargs=-Xmx2g -XX:MaxMetaspaceSize=256m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 -toolchain.launcher.jvmargs=-Xmx2g -XX:MaxMetaspaceSize=384m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 +# We set '-Dlog4j2.disableJmx=true' to prevent classloader leaks triggered by the logger. +# (Some of these settings need to be repeated in the test.jvmArgs blocks of each module) +org.gradle.jvmargs=-Dlog4j2.disableJmx -Xmx2g -XX:MaxMetaspaceSize=448m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 +toolchain.compiler.jvmargs=-Dlog4j2.disableJmx=true -Xmx2g -XX:MaxMetaspaceSize=256m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 +toolchain.javadoc.jvmargs=-Dlog4j2.disableJmx=true -Xmx2g -XX:MaxMetaspaceSize=256m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 +toolchain.launcher.jvmargs=-Dlog4j2.disableJmx=true -Xmx2g -XX:MaxMetaspaceSize=384m -XX:+HeapDumpOnOutOfMemoryError -Duser.language=en -Duser.country=US -Duser.timezone=UTC -Dfile.encoding=UTF-8 org.gradle.parallel=true diff --git a/gradle/databases.gradle b/gradle/databases.gradle index 0f14b01bc086..11bc29bf0220 100644 --- a/gradle/databases.gradle +++ b/gradle/databases.gradle @@ -12,14 +12,16 @@ ext { db = project.hasProperty('db') ? project.getProperty('db') : 'h2' dbHost = System.getProperty( 'dbHost', 'localhost' ) dbService = System.getProperty( 'dbService', '' ) + runID = System.getProperty( 'runID', '' ) dbBundle = [ h2 : [ 'db.dialect' : 'org.hibernate.dialect.H2Dialect', 'jdbc.driver': 'org.h2.Driver', 'jdbc.user' : 'sa', 'jdbc.pass' : '', - 'jdbc.url' : 'jdbc:h2:mem:db1;DB_CLOSE_DELAY=-1;LOCK_TIMEOUT=10000', - 'connection.init_sql' : '' + 'jdbc.url' : 'jdbc:h2:mem:db1;DB_CLOSE_DELAY=-1;LOCK_TIMEOUT=10000;DB_CLOSE_ON_EXIT=FALSE', + 'connection.init_sql' : '', + 'hibernate.dialect.native_param_markers' : 'true' ], hsqldb : [ 'db.dialect' : 'org.hibernate.dialect.HSQLDialect', @@ -34,7 +36,7 @@ ext { 'jdbc.driver': 'org.apache.derby.iapi.jdbc.AutoloadedDriver', 'jdbc.user' : 'hibernate_orm_test', 'jdbc.pass' : 'hibernate_orm_test', - 'jdbc.url' : 'jdbc:derby:target/tmp/derby/hibernate_orm_test;databaseName=hibernate_orm_test;create=true', + 'jdbc.url' : 'jdbc:derby:memory:;databaseName=hibernate_orm_test;create=true', 'connection.init_sql' : '' ], derby_old : [ @@ -42,7 +44,7 @@ ext { 'jdbc.driver': 'org.apache.derby.jdbc.EmbeddedDriver', 'jdbc.user' : 'hibernate_orm_test', 'jdbc.pass' : 'hibernate_orm_test', - 'jdbc.url' : 'jdbc:derby:target/tmp/derby/hibernate_orm_test;databaseName=hibernate_orm_test;create=true', + 'jdbc.url' : 'jdbc:derby:memory:;databaseName=hibernate_orm_test;create=true', 'connection.init_sql' : '' ], pgsql : [ @@ -78,9 +80,18 @@ ext { 'jdbc.user' : 'hibernate_orm_test', 'jdbc.pass' : 'hibernate_orm_test', // Disable prepared statement caching to avoid issues with changing schemas - 'jdbc.url' : 'jdbc:jtds:sybase://' + dbHost + ':5000/hibernate_orm_test;maxStatements=0;cacheMetaData=false', + 'jdbc.url' : 'jdbc:jtds:sybase://' + dbHost + ':9000/hibernate_orm_test;maxStatements=0;cacheMetaData=false', 'connection.init_sql' : 'set ansinull on' ], + sybase_jconn_ci : [ + 'db.dialect' : 'org.hibernate.dialect.SybaseASEDialect', + 'jdbc.driver': 'com.sybase.jdbc4.jdbc.SybDriver', + 'jdbc.user' : 'hibernate_orm_test', + 'jdbc.pass' : 'hibernate_orm_test', + // Disable prepared statement caching to avoid issues with changing schemas + 'jdbc.url' : 'jdbc:sybase:Tds:' + dbHost + ':9000/hibernate_orm_test', + 'connection.init_sql' : 'set ansinull on set quoted_identifier on' + ], mysql : [ 'db.dialect' : 'org.hibernate.dialect.MySQLDialect', 'jdbc.driver': 'com.mysql.cj.jdbc.Driver', @@ -118,7 +129,7 @@ ext { 'jdbc.driver': 'com.mysql.jdbc.Driver', 'jdbc.user' : 'hibernate_orm_test', 'jdbc.pass' : 'hibernate_orm_test', - 'jdbc.url' : 'jdbc:mysql://' + dbHost + '/hibernate_orm_test', + 'jdbc.url' : 'jdbc:mysql://' + dbHost + ':4000/hibernate_orm_test', 'connection.init_sql' : '' ], oracle : [ @@ -130,6 +141,14 @@ ext { 'connection.init_sql' : '' ], oracle_ci : [ + 'db.dialect' : 'org.hibernate.dialect.OracleDialect', + 'jdbc.driver': 'oracle.jdbc.OracleDriver', + 'jdbc.user' : 'hibernate_orm_test', + 'jdbc.pass' : 'hibernate_orm_test', + 'jdbc.url' : 'jdbc:oracle:thin:@' + dbHost + ':1521/freepdb1', + 'connection.init_sql' : '' + ], + oracle_xe_ci : [ 'db.dialect' : 'org.hibernate.dialect.OracleDialect', 'jdbc.driver': 'oracle.jdbc.OracleDriver', 'jdbc.user' : 'hibernate_orm_test', @@ -149,13 +168,62 @@ ext { oracle_cloud_autonomous_tls : [ 'db.dialect' : 'org.hibernate.dialect.OracleDialect', 'jdbc.driver': 'oracle.jdbc.OracleDriver', - 'jdbc.user' : 'hibernate_orm_test', + 'jdbc.user' : 'hibernate_orm_test_' + runID, + 'jdbc.pass' : 'Oracle_19_Password', + // Requires dbHost (pointing to the right cloud region) AND dbService (unique database name). + // + // To avoid hibernate-spatial tests failure, JVM must be enabled as stated in documentation: + // https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/autonomous-oracle-java.html + 'jdbc.url' : 'jdbc:oracle:thin:@(description=(retry_count=5)(retry_delay=1)(address=(protocol=tcps)(port=1521)(host=' + dbHost + '.oraclecloud.com))(connect_data=(service_name=' + dbService + '_tp.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))?oracle.jdbc.enableQueryResultCache=false&oracle.jdbc.thinForceDNSLoadBalancing=true&tcp.nodelay=yes', + 'connection.init_sql' : '' + ], + oracle_cloud_autonomous : [ + 'db.dialect' : 'org.hibernate.dialect.OracleDialect', + 'jdbc.driver': 'oracle.jdbc.OracleDriver', + 'jdbc.user' : 'hibernate_orm_test_' + runID, + 'jdbc.pass' : 'Oracle_19_Password', + // Requires dbHost (pointing to the right cloud region) AND dbService (unique database name). + // To avoid hibernate-spatial tests failure, JVM must be enabled as stated in documentation: + // https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/autonomous-oracle-java.html + 'jdbc.url' : 'jdbc:oracle:thin:@(description=(retry_count=5)(retry_delay=1)(address=(protocol=tcp)(port=1521)(host=' + dbHost + '.oraclevcn.com))(connect_data=(service_name=' + dbService + '_tp.adb.oraclecloud.com))(security=(ssl_server_dn_match=no)))?oracle.jdbc.enableQueryResultCache=false&oracle.jdbc.thinForceDNSLoadBalancing=true&tcp.nodelay=yes', + 'jdbc.datasource' : 'oracle.jdbc.OracleDriver', +// 'jdbc.datasource' : 'oracle.jdbc.datasource.impl.OracleDataSource', + 'connection.init_sql' : '' + ], + oracle_cloud_db19c : [ + 'db.dialect' : 'org.hibernate.dialect.OracleDialect', + 'jdbc.driver': 'oracle.jdbc.OracleDriver', + 'jdbc.user' : 'hibernate_orm_test_' + runID, 'jdbc.pass' : 'Oracle_19_Password', // Requires dbHost (pointing to the right cloud region) AND dbService (unique database name). // // To avoid hibernate-spatial tests failure, JVM must be enabled as stated in documentation: // https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/autonomous-oracle-java.html - 'jdbc.url' : 'jdbc:oracle:thin:@(description=(retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521)(host=' + dbHost + '.oraclecloud.com))(connect_data=(service_name=' + dbService + '_low.adb.oraclecloud.com))(security=(ssl_server_dn_match=yes)))', + 'jdbc.url' : 'jdbc:oracle:thin:@' + dbHost + ':1521/' + dbService, + 'connection.init_sql' : '' + ], + oracle_cloud_db21c : [ + 'db.dialect' : 'org.hibernate.dialect.OracleDialect', + 'jdbc.driver': 'oracle.jdbc.OracleDriver', + 'jdbc.user' : 'hibernate_orm_test_' + runID, + 'jdbc.pass' : 'Oracle_21_Password', + // Requires dbHost (pointing to the right cloud region) AND dbService (unique database name). + // + // To avoid hibernate-spatial tests failure, JVM must be enabled as stated in documentation: + // https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/autonomous-oracle-java.html + 'jdbc.url' : 'jdbc:oracle:thin:@' + dbHost + ':1521/' + dbService, + 'connection.init_sql' : '' + ], + oracle_cloud_db23c : [ + 'db.dialect' : 'org.hibernate.dialect.OracleDialect', + 'jdbc.driver': 'oracle.jdbc.OracleDriver', + 'jdbc.user' : 'hibernate_orm_test_' + runID, + 'jdbc.pass' : 'Oracle_23_Password', + // Requires dbHost (pointing to the right cloud region) AND dbService (unique database name). + // + // To avoid hibernate-spatial tests failure, JVM must be enabled as stated in documentation: + // https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/autonomous-oracle-java.html + 'jdbc.url' : 'jdbc:oracle:thin:@' + dbHost + ':1521/' + dbService, 'connection.init_sql' : '' ], mssql : [ @@ -247,7 +315,7 @@ if ( processTestResourcesTask != null ) { processTestResourcesTask.inputs.property( 'dbHost', dbHost ) // processTestResourcesTask.inputs.property( "gradle.libs.versions.h2", project.getProperty( "gradle.libs.versions.h2", "2.1.214" ) ) // processTestResourcesTask.inputs.property( "gradle.libs.versions.h2gis", project.getProperty( "gradle.libs.versions.h2gis", "2.1.0" ) ) -// processTestResourcesTask.inputs.property( "gradle.libs.versions.hsqldb", project.getProperty( "gradle.libs.versions.hsqldb", "2.7.1" ) ) +// processTestResourcesTask.inputs.property( "gradle.libs.versions.hsqldb", project.getProperty( "gradle.libs.versions.hsqldb", "2.7.2" ) ) // processTestResourcesTask.inputs.property( "gradle.libs.versions.derby", project.getProperty( "gradle.libs.versions.derby", "10.15.2.0" ) ) processTestResourcesTask.filter( ReplaceTokens, tokens: dbBundle[db] ) -} \ No newline at end of file +} diff --git a/gradle/gradle-enterprise.gradle b/gradle/gradle-enterprise.gradle index 700b0618af9e..f978bb36e574 100644 --- a/gradle/gradle-enterprise.gradle +++ b/gradle/gradle-enterprise.gradle @@ -6,12 +6,13 @@ */ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Applies details for `https://ge.hibernate.org` +// Applies details for `https://develocity.commonhaus.dev` // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ext { isCiEnvironment = isJenkins() || isGitHubActions() || isGenericCi() - populateRemoteBuildCache = getSetting( "POPULATE_REMOTE" ).isPresent() + populateRemoteBuildCache = isEnabled( "POPULATE_REMOTE" ) + useRemoteCache = !isEnabled( "DISABLE_REMOTE_GRADLE_CACHE" ) } private static boolean isJenkins() { @@ -36,8 +37,16 @@ static java.util.Optional getSetting(String name) { return java.util.Optional.ofNullable(sysProp); } +static boolean isEnabled(String setting) { + if ( System.getenv().hasProperty( setting ) ) { + return true + } + + return System.hasProperty( setting ) +} + gradleEnterprise { - server = 'https://ge.hibernate.org' + server = 'https://develocity.commonhaus.dev' buildScan { captureTaskInputFiles = true diff --git a/gradle/java-module.gradle b/gradle/java-module.gradle index 7f651fd7ee9d..44cc7754fff2 100644 --- a/gradle/java-module.gradle +++ b/gradle/java-module.gradle @@ -21,15 +21,17 @@ buildscript { import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import org.apache.tools.ant.filters.ReplaceTokens +apply plugin: 'java-library' + apply from: rootProject.file( 'gradle/module.gradle' ) -apply from: rootProject.file( 'gradle/libraries.gradle' ) apply from: rootProject.file( 'gradle/databases.gradle' ) +apply from: rootProject.file( 'gradle/javadoc.gradle' ) -apply plugin: 'org.hibernate.orm.database-service' - -apply plugin: 'java-library' apply plugin: 'biz.aQute.bnd.builder' apply plugin: 'org.hibernate.orm.database-service' +apply plugin: 'org.hibernate.orm.build.java-module' + +apply plugin: 'org.checkerframework' apply plugin: 'checkstyle' apply plugin: 'build-dashboard' @@ -57,6 +59,15 @@ if ( !project.description ) { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Configurations and Dependencies +configurations.configureEach { + resolutionStrategy.eachDependency { details -> + //Force the "byte buddy agent" version to match the Byte Buddy version + // we use, as Mockito might pull in a mismatched version transitively + if (details.requested.group == "net.bytebuddy" && details.requested.name == 'byte-buddy-agent') { + details.useVersion libs.versions.byteBuddy.get() + } + } +} dependencies { implementation libs.logging @@ -89,10 +100,8 @@ dependencies { testRuntimeOnly dbLibs.mssql testRuntimeOnly dbLibs.informix testRuntimeOnly dbLibs.cockroachdb - testRuntimeOnly dbLibs.oracle - testRuntimeOnly dbLibs.oracleXml - testRuntimeOnly dbLibs.oracleXmlParser testRuntimeOnly dbLibs.sybase + testRuntimeOnly rootProject.fileTree(dir: 'drivers', include: '*.jar') // Since both the DB2 driver and HANA have a package "net.jpountz" we have to add dependencies conditionally // This is due to the "no split-packages" requirement of Java 9+ @@ -112,6 +121,11 @@ dependencies { else if ( db.startsWith( 'firebird' ) ) { testRuntimeOnly dbLibs.firebird } + else if ( db.startsWith( 'oracle' ) ) { + testRuntimeOnly dbLibs.oracle + testRuntimeOnly dbLibs.oracleXml + testRuntimeOnly dbLibs.oracleXmlParser + } annotationProcessor libs.loggingProcessor annotationProcessor libs.logging @@ -150,64 +164,15 @@ artifacts { tasks.withType( JavaCompile ) { options.encoding = 'UTF-8' options.warnings false + options.fork = true + options.forkOptions.memoryMaximumSize = '768m' + // javaCompileTask.options.compilerArgs += [ // "-nowarn", // "-encoding", "UTF-8" // ] } -if ( !gradle.ext.javaToolchainEnabled ) { - tasks.compileJava.configure { - sourceCompatibility = JavaVersion.toVersion( gradle.ext.javaVersions.main.release ) - targetCompatibility = JavaVersion.toVersion( gradle.ext.javaVersions.main.release ) - } - tasks.compileTestJava.configure { - sourceCompatibility = JavaVersion.toVersion( gradle.ext.javaVersions.test.release ) - targetCompatibility = JavaVersion.toVersion( gradle.ext.javaVersions.test.release ) - } -} -else { - // Configure generated bytecode - // "sourceCompatibility" is not supported with toolchains. We have to work around that limitation. - tasks.compileJava.configure { - options.release = gradle.ext.javaVersions.main.release.asInt() - // Needs add-opens because of https://github.com/gradle/gradle/issues/15538 - options.forkOptions.jvmArgs.addAll( ["--add-opens", "jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED"] ) - } - tasks.compileTestJava.configure { - options.release = gradle.ext.javaVersions.test.release.asInt() - // Needs add-opens because of https://github.com/gradle/gradle/issues/15538 - options.forkOptions.jvmArgs.addAll( ["--add-opens", "jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED"] ) - } - - // Configure version of Java tools - java { - toolchain { - languageVersion = gradle.ext.javaVersions.main.compiler - } - } - tasks.compileTestJava { - javaCompiler = javaToolchains.compilerFor { - languageVersion = gradle.ext.javaVersions.test.compiler - } - } - - // Configure JVM Options - // Display version of Java tools - tasks.withType( JavaCompile ).configureEach { - options.forkOptions.jvmArgs.addAll( getProperty( 'toolchain.compiler.jvmargs' ).toString().split( ' ' ) ) - doFirst { - logger.lifecycle "Compiling with '${javaCompiler.get().metadata.installationPath}'" - } - } - tasks.withType( Javadoc ).configureEach { - options.setJFlags( getProperty( 'toolchain.javadoc.jvmargs' ).toString().split( ' ' ).toList().findAll( { !it.isEmpty() } ) ) - doFirst { - logger.lifecycle "Generating javadoc with '${javadocTool.get().metadata.installationPath}'" - } - } -} - task compile(dependsOn: [compileJava, processResources, compileTestJava, processTestResources] ) @@ -227,26 +192,6 @@ if ( toolsJar.exists() ) { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Testing - -if ( gradle.ext.javaToolchainEnabled ) { - tasks.test { - // Configure version of Java tools - javaLauncher = javaToolchains.launcherFor { - languageVersion = gradle.ext.javaVersions.test.launcher - } - - // Configure JVM Options - jvmArgs( getProperty( 'toolchain.launcher.jvmargs' ).toString().split( ' ' ) ) - if ( project.hasProperty( 'test.jdk.launcher.args' ) ) { - jvmArgs( project.getProperty( 'test.jdk.launcher.args' ).toString().split( ' ' ) ) - } - - // Display version of Java tools - doFirst { - logger.lifecycle "Testing with '${javaLauncher.get().metadata.installationPath}'" - } - } -} class HeapDumpPathProvider implements CommandLineArgumentProvider { @OutputDirectory Provider path @@ -277,6 +222,7 @@ tasks.withType( Test.class ).each { test -> test.maxHeapSize = '3G' test.systemProperties['hibernate.test.validatefailureexpected'] = true + test.systemProperties['hibernate.highlight_sql'] = false test.systemProperties += System.properties.findAll { it.key.startsWith( "hibernate." ) } test.enableAssertions = true @@ -351,15 +297,15 @@ test { jvmArgs '-XX:+StartAttachListener' } -// Enable the experimental features of ByteBuddy with JDK 19+ +// Enable the experimental features of ByteBuddy with JDK 22+ test { // We need to test the *launcher* version, // because some tests will use Mockito (and thus Bytebuddy) to mock/spy // classes that are part of the JDK, // and those classes always have bytecode matching the version of the launcher. - // So for example, when using a JDK19 launcher and compiling tests with --release 17, - // Bytebuddy will still encounter classes with Java 19 bytecode. - if ( gradle.ext.javaVersions.test.launcher.asInt() >= 19 ) { + // So for example, when using a JDK22 launcher and compiling tests with --release 21, + // Bytebuddy will still encounter classes with Java 22 bytecode. + if ( jdkVersions.test.launcher.asInt() >= 22 ) { logger.warn( "The version of Java bytecode that will be tested is not supported by Bytebuddy by default. " + " Setting 'net.bytebuddy.experimental=true'." ) systemProperty 'net.bytebuddy.experimental', true @@ -372,6 +318,7 @@ test { // Used in the Travis build so that Travis doesn't end up panicking because there's no output for a long time. testLogging { events "passed", "skipped", "failed" + exceptionFormat = 'full' } } } @@ -451,7 +398,12 @@ task sourcesJar(type: Jar) { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Javadoc -apply from: rootProject.file( 'gradle/javadoc.gradle' ) +tasks.named( "javadoc", Javadoc ) { + configure( options ) { + windowTitle = "Hibernate Javadocs ($project.name)" + docTitle = "Hibernate Javadocs ($project.name : $project.version)" + } +} // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // IDE @@ -502,6 +454,9 @@ checkstyle { // exclude generated java sources - by explicitly setting the base source dir tasks.checkstyleMain.source = 'src/main/java' +tasks.checkstyleMain + .exclude('org/hibernate/jpamodelgen/util/NullnessUtil.java') + .exclude('org/hibernate/internal/util/NullnessUtil.java') // define a second checkstyle task for checking non-fatal violations task nonFatalCheckstyle(type:Checkstyle) { @@ -511,6 +466,17 @@ task nonFatalCheckstyle(type:Checkstyle) { configFile = rootProject.file( 'shared/config/checkstyle/checkstyle-non-fatal.xml' ) } +checkerFramework { + checkers = [ + 'org.checkerframework.checker.nullness.NullnessChecker' + ] + extraJavacArgs = [ + '-AsuppressWarnings=initialization', + "-Astubs=${project.rootDir}/checkerstubs", + '-AonlyDefs=^org\\.hibernate\\.(jpamodelgen|spi|pretty|(action|context|bytecode)\\.spi)\\.' + ] +} + task forbiddenApisSystemOut(type: CheckForbiddenApis, dependsOn: compileJava) { bundledSignatures += 'jdk-system-out' @@ -518,7 +484,7 @@ task forbiddenApisSystemOut(type: CheckForbiddenApis, dependsOn: compileJava) { } task forbiddenApisUnsafe(type: CheckForbiddenApis, dependsOn: compileJava) { - bundledSignatures += "jdk-unsafe-${gradle.ext.baselineJavaVersion}".toString() + bundledSignatures += "jdk-unsafe-${jdkVersions.baseline}".toString() // unfortunately we currently have many uses of default Locale implicitly (~370) which need to be fixed // before we can fully enabled this check diff --git a/gradle/javadoc.gradle b/gradle/javadoc.gradle index c1257077d0b6..7c04b14386bf 100644 --- a/gradle/javadoc.gradle +++ b/gradle/javadoc.gradle @@ -5,16 +5,20 @@ * See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html */ -// make sure Java plugin is applied -apply plugin : 'java' - apply from: rootProject.file( 'gradle/base-information.gradle' ) -javadoc { - exclude( "**/internal/*" ) - exclude( "**/generated-src/**" ) +tasks.named( "javadoc", Javadoc ) { + def currentYear = new GregorianCalendar().get( Calendar.YEAR ) + + inputs.property "ormVersion", project.ormVersion + inputs.property "currentYear", currentYear + + // exclude any generated sources and internal packages + exclude '**/generated-src/**' + exclude '**/internal/**' + include '**/*.java' - final int currentYear = new GregorianCalendar().get( Calendar.YEAR ) + maxMemory = '512m' configure( options ) { // this is the config needed to use asciidoclet for Javadoc rendering. It relies on a build from John's PR @ https://github.com/asciidoctor/asciidoclet/pull/91 @@ -25,25 +29,30 @@ javadoc { // Travis CI JDK 11 build did not like this // docletpath = configurations.asciidoclet.files.asType(List) // doclet = 'org.asciidoctor.Asciidoclet' - windowTitle = "$project.name JavaDocs" - docTitle = "$project.name JavaDocs ($project.version)" - bottom = "Copyright © 2001-$currentYear Red Hat, Inc. All Rights Reserved." + use = true encoding = 'UTF-8' - links += [ - 'https://docs.oracle.com/en/java/javase/11/docs/api/', - 'https://jakarta.ee/specifications/platform/9/apidocs/' - ] - tags = [ "apiNote", 'implSpec', 'implNote', 'todo' ] - addStringOption( 'Xdoclint:none', '-quiet' ) + stylesheetFile = rootProject.file( "shared/javadoc/stylesheet.css" ) + bottom = "Copyright © 2001-$currentYear Red Hat, Inc. All Rights Reserved." + + // The javadoc folder contains cached versions of the respective element-list files to avoid release issues when servers are down + // When upgrading versions of the libraries, don't forget to update the file contents in the repository + linksOffline 'https://docs.oracle.com/en/java/javase/11/docs/api/', "${project.rootDir}/javadoc/javase11" + linksOffline 'https://jakarta.ee/specifications/bean-validation/3.0/apidocs/', "${project.rootDir}/javadoc/jakarta-validation-3.0" + linksOffline 'https://jakarta.ee/specifications/cdi/4.0/apidocs/', "${project.rootDir}/javadoc/jakarta-cdi-4.0" + linksOffline 'https://jakarta.ee/specifications/platform/9/apidocs/', "${project.rootDir}/javadoc/jakarta-platform-9" + linksOffline 'https://www.javadoc.io/doc/javax.cache/cache-api/1.0.0/', "${project.rootDir}/javadoc/javax-cache-1.0" tags( - 'todo:X"', - 'apiNote:a:"API Note:"', - 'implSpec:a:"Implementation Specification:"', - 'implNote:a:"Implementation Note:"' + 'todo:X', + 'apiNote:a:API Note:', + 'implSpec:a:Implementation Specification:', + 'implNote:a:Implementation Note:', + 'settingDefault:f:Default Value:' ) + + addStringOption( 'Xdoclint:none', '-quiet' ) } } diff --git a/gradle/libraries.gradle b/gradle/libraries.gradle deleted file mode 100644 index 3822afe76770..000000000000 --- a/gradle/libraries.gradle +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ - -configurations.all { - resolutionStrategy.eachDependency { details -> - //Force the "byte buddy agent" version to match the Byte Buddy version - // we use, as Mockito might pull in a mismatched version transitively - if (details.requested.group == "net.bytebuddy" && details.requested.name == 'byte-buddy-agent') { - details.useVersion libs.versions.byteBuddy.get() - } - } -} diff --git a/gradle/published-java-module.gradle b/gradle/published-java-module.gradle index b9ea71617b22..73e41b780f83 100644 --- a/gradle/published-java-module.gradle +++ b/gradle/published-java-module.gradle @@ -9,8 +9,6 @@ apply from: rootProject.file( 'gradle/releasable.gradle' ) apply from: rootProject.file( 'gradle/java-module.gradle' ) apply from: rootProject.file( 'gradle/publishing-pom.gradle' ) -apply plugin: 'signing' - // Make sure that the publishReleaseArtifacts task of the release module runs the release task of this sub module tasks.getByPath( ':release:publishReleaseArtifacts' ).dependsOn tasks.release @@ -29,6 +27,8 @@ dependencies { // Publishing java { + // Configure the Java "software component" to include javadoc and sources jars in addition to the classes jar. + // Ultimately, this component is what makes up the publication for this project. withJavadocJar() withSourcesJar() } @@ -97,161 +97,52 @@ publishing { } } - -var signingKey = resolveSigningKey() -var signingPassword = findSigningProperty( "signingPassword" ) - -signing { - useInMemoryPgpKeys( signingKey, signingPassword ) - - sign publishing.publications.publishedArtifacts -} - -String resolveSigningKey() { - var key = findSigningProperty( "signingKey" ) - if ( key != null ) { - return key - } - - var keyFile = findSigningProperty( "signingKeyFile" ) - if ( keyFile != null ) { - return new File( keyFile ).text - } - - return null -} - -String findSigningProperty(String propName) { - if ( System.getProperty( propName ) != null ) { - logger.debug "Found `{}` as a system property", propName - return System.getProperty(propName ) - } - else if ( System.getenv().get( propName ) != null ) { - logger.debug "Found `{}` as an env-var property", propName - return System.getenv().get( propName ) - } - else if ( project.hasProperty( propName ) ) { - logger.debug "Found `{}` as a project property", propName - return project.hasProperty( propName ) - } - else { - logger.debug "Did not find `{}`", propName - return null - } -} - - -var signingTask = project.tasks.getByName( "signPublishedArtifactsPublication" ) as Sign -var signingExtension = project.getExtensions().getByType(SigningExtension) as SigningExtension - -task sign { - dependsOn "signPublications" -} - -task signPublications { t -> - tasks.withType( Sign ).all { s -> - t.dependsOn s - } -} - -signingTask.doFirst { - if ( signingKey == null || signingPassword == null ) { - throw new GradleException( - "Cannot perform signing without GPG details. Please set the `signingKey` and `signingKeyFile` properties" - ) - } -} - - -boolean wasSigningExplicitlyRequested() { - // check whether signing task was explicitly requested when running the build - // - // NOTE: due to https://discuss.gradle.org/t/how-to-tell-if-a-task-was-explicitly-asked-for-on-the-command-line/42853/3 - // we cannot definitively know whether the task was requested. Gradle really just does not expose this information. - // so we make a convention - we check the "start parameters" object to see which task-names were requested; - // the problem is that these are the raw names directly from the command line. e.g. it is perfectly legal to - // say `gradlew signPubArtPub` in place of `gradlew signPublishedArtifactsPublication` - Gradle will simply - // "expand" the name it finds. However, it does not make that available. - // - // so the convention is that we will check for the following task names - // - // for each of: - // 1. `sign` - // 2. `signPublications` - // 3. `signPublishedArtifactsPublication` - // - // and we check both forms: - // 1. "${taskName}" - // 2. project.path + ":${taskName}" - // - // we need to check both again because of the "start parameters" discussion - - def signingTaskNames = ["sign", "signPublications", "signPublishedArtifactsPublication"] - - for ( String taskName : signingTaskNames ) { - if ( gradle.startParameter.taskNames.contains( taskName ) - || gradle.startParameter.taskNames.contains( "${project.path}:${taskName}" ) ) { - return true +tasks.withType(PublishToMavenLocal).configureEach { + doFirst { + logger.lifecycle("PublishToMavenLocal ({})", publication.name) + logger.lifecycle(" - {} : {} : {} ", publication.groupId, publication.artifactId, publication.pom.packaging) + logger.lifecycle(" - artifacts ({})...", publication.artifacts.size()) + publication.artifacts.forEach { + logger.lifecycle(" - artifact ({}) : {} ({})" , it.classifier, it.file, it.file.size()) } } - - return false } -if ( wasSigningExplicitlyRequested() ) { - // signing was explicitly requested - signingExtension.required = true -} -else { - gradle.taskGraph.whenReady { graph -> - if ( graph.hasTask( signingTask ) ) { - // signing is scheduled to happen. - // - // we know, from above if-check, that it was not explicitly requested - - // so it is triggered via task dependency. make sure we want it to happen - var publishingTask = project.tasks.getByName( "publishPublishedArtifactsPublicationToSonatypeRepository" ) as PublishToMavenRepository - if ( graph.hasTask( publishingTask ) ) { - // we are publishing to Sonatype OSSRH - we need the signing to happen - signingExtension.required = true - } - else { - // signing was not explicitly requested and we are not publishing to OSSRH, - // so do not sign. - signingTask.enabled = false - } +tasks.withType(PublishToMavenRepository).configureEach { + doFirst { + logger.lifecycle("PublishToMavenRepository ({} : {})", publication.name, repository.name) + logger.lifecycle(" - {} : {} : {} ", publication.groupId, publication.artifactId, publication.pom.packaging) + logger.lifecycle(" - artifacts ({})...", publication.artifacts.size()) + publication.artifacts.forEach { + logger.lifecycle(" - artifact ({}) : {} ({})" , it.classifier, it.file, it.file.size()) } - } } - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Release / publishing tasks -task ciBuild { - dependsOn test, tasks.publishToSonatype +tasks.register('ciBuild') { + dependsOn test } -tasks.release.dependsOn tasks.test, tasks.publishToSonatype +tasks.release.dependsOn tasks.test tasks.preVerifyRelease.dependsOn build tasks.preVerifyRelease.dependsOn generateMetadataFileForPublishedArtifactsPublication tasks.preVerifyRelease.dependsOn generatePomFileForPublishedArtifactsPublication tasks.preVerifyRelease.dependsOn generatePomFileForRelocationPomPublication -tasks.publishToSonatype.mustRunAfter test - - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Ancillary tasks -task showPublications { +tasks.register('showPublications') { doFirst { - project.publishing.publications.each { publication -> + publishing.publications.each { publication -> println "Publication (${publication.name}): ${publication.groupId}:${publication.artifactId}:${publication.version}" publication.artifacts.each { artifact -> println " > ${artifact}" } } } -} \ No newline at end of file +} diff --git a/gradle/publishing-pom.gradle b/gradle/publishing-pom.gradle index 4654f0d873e1..cb8bfa7f72b5 100644 --- a/gradle/publishing-pom.gradle +++ b/gradle/publishing-pom.gradle @@ -59,5 +59,18 @@ publishing { } } + repositories { + maven { + name = "staging" + url = rootProject.layout.buildDirectory.dir("staging-deploy${File.separator}maven") + } + maven { + name = 'snapshots' + url = "https://central.sonatype.com/repository/maven-snapshots/" + // So that Gradle uses the `ORG_GRADLE_PROJECT_snapshotsPassword` / `ORG_GRADLE_PROJECT_snapshotsUsername` + // env variables to read the username/password for the `snapshots` repository publishing: + credentials(PasswordCredentials) + } + } } diff --git a/gradle/releasable.gradle b/gradle/releasable.gradle index fdcb92c20220..eb5052d02db7 100644 --- a/gradle/releasable.gradle +++ b/gradle/releasable.gradle @@ -1,7 +1,6 @@ apply from: rootProject.file( 'gradle/base-information.gradle' ) task release { - dependsOn ':release:releaseChecks' mustRunAfter ':release:releaseChecks' enabled !project.ormVersion.isSnapshot } diff --git a/gradle/version.properties b/gradle/version.properties index f0e51cefccd4..fb2a3b9918d1 100644 --- a/gradle/version.properties +++ b/gradle/version.properties @@ -1 +1 @@ -hibernateVersion=6.2.0-SNAPSHOT \ No newline at end of file +hibernateVersion=6.2.44-SNAPSHOT \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 7454180f2ae8..7f93135c49b7 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 2e6e5897b528..3fa8f862f753 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-bin.zip +networkTimeout=10000 +validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index 1b6c787337ff..0adc8e1a5321 100755 --- a/gradlew +++ b/gradlew @@ -55,7 +55,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -80,13 +80,11 @@ do esac done -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit - -APP_NAME="Gradle" +# This is normally unused +# shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum @@ -133,22 +131,29 @@ location of your Java installation." fi else JAVACMD=java - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." + fi fi # Increase the maximum file descriptors if we can. if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac case $MAX_FD in #( '' | soft) :;; #( *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -193,6 +198,10 @@ if "$cygwin" || "$msys" ; then done fi + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + # Collect all arguments for the java command; # * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of # shell script including quotes and variable substitutions, so put them in @@ -205,6 +214,12 @@ set -- \ org.gradle.wrapper.GradleWrapperMain \ "$@" +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + # Use "xargs" to parse quoted args. # # With -n1 it outputs one arg per line, with the quotes and backslashes removed. diff --git a/gradlew.bat b/gradlew.bat index ac1b06f93825..6689b85beecd 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -14,7 +14,7 @@ @rem limitations under the License. @rem -@if "%DEBUG%" == "" @echo off +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -25,7 +25,8 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @@ -40,7 +41,7 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute +if %ERRORLEVEL% equ 0 goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -75,13 +76,15 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/hibernate-agroal/src/test/java/org/hibernate/test/agroal/AgroalSkipAutoCommitTest.java b/hibernate-agroal/src/test/java/org/hibernate/test/agroal/AgroalSkipAutoCommitTest.java index 44df1a49f9c1..de5209d48172 100644 --- a/hibernate-agroal/src/test/java/org/hibernate/test/agroal/AgroalSkipAutoCommitTest.java +++ b/hibernate-agroal/src/test/java/org/hibernate/test/agroal/AgroalSkipAutoCommitTest.java @@ -9,28 +9,21 @@ import org.hibernate.cfg.AvailableSettings; import org.hibernate.cfg.Configuration; import org.hibernate.test.agroal.util.PreparedStatementSpyConnectionProvider; -import org.hibernate.testing.DialectChecks; -import org.hibernate.testing.RequiresDialectFeature; import org.hibernate.testing.junit4.BaseCoreFunctionalTestCase; import org.junit.Test; import jakarta.persistence.Entity; import jakarta.persistence.Id; import java.sql.Connection; -import java.sql.SQLException; import java.util.List; import static org.hibernate.testing.transaction.TransactionUtil.doInHibernate; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; /** * @author Vlad Mihalcea */ -@RequiresDialectFeature(DialectChecks.SupportsJdbcDriverProxying.class) public class AgroalSkipAutoCommitTest extends BaseCoreFunctionalTestCase { private PreparedStatementSpyConnectionProvider connectionProvider = new PreparedStatementSpyConnectionProvider(); @@ -74,12 +67,15 @@ private void verifyConnections() { List connections = connectionProvider.getReleasedConnections(); assertEquals( 1, connections.size() ); - Connection connection = connections.get( 0 ); try { - verify(connection, never()).setAutoCommit( false ); + List setAutoCommitCalls = connectionProvider.spyContext.getCalls( + Connection.class.getMethod( "setAutoCommit", boolean.class ), + connections.get( 0 ) + ); + assertTrue( "setAutoCommit should never be called", setAutoCommitCalls.isEmpty() ); } - catch (SQLException e) { - fail(e.getMessage()); + catch (NoSuchMethodException e) { + throw new RuntimeException( e ); } } diff --git a/hibernate-agroal/src/test/java/org/hibernate/test/agroal/util/PreparedStatementSpyConnectionProvider.java b/hibernate-agroal/src/test/java/org/hibernate/test/agroal/util/PreparedStatementSpyConnectionProvider.java index 1f9025f42a44..5c86333f8d38 100644 --- a/hibernate-agroal/src/test/java/org/hibernate/test/agroal/util/PreparedStatementSpyConnectionProvider.java +++ b/hibernate-agroal/src/test/java/org/hibernate/test/agroal/util/PreparedStatementSpyConnectionProvider.java @@ -9,18 +9,13 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; -import java.sql.Statement; import java.util.ArrayList; -import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import org.hibernate.agroal.internal.AgroalConnectionProvider; import org.hibernate.engine.jdbc.connections.spi.ConnectionProvider; -import org.mockito.ArgumentMatchers; -import org.mockito.Mockito; -import org.mockito.internal.util.MockUtil; +import org.hibernate.testing.jdbc.JdbcSpies; /** * This {@link ConnectionProvider} extends any other ConnectionProvider that would be used by default taken the current configuration properties, and it @@ -29,8 +24,7 @@ * @author Vlad Mihalcea */ public class PreparedStatementSpyConnectionProvider extends AgroalConnectionProvider { - - private final Map preparedStatementMap = new LinkedHashMap<>(); + public final JdbcSpies.SpyContext spyContext = new JdbcSpies.SpyContext(); private final List acquiredConnections = new ArrayList<>( ); private final List releasedConnections = new ArrayList<>( ); @@ -53,7 +47,7 @@ public Connection getConnection() throws SQLException { public void closeConnection(Connection conn) throws SQLException { acquiredConnections.remove( conn ); releasedConnections.add( conn ); - super.closeConnection( (Connection) MockUtil.getMockSettings( conn ).getSpiedInstance() ); + super.closeConnection( spyContext.getSpiedInstance( conn ) ); } @Override @@ -63,29 +57,7 @@ public void stop() { } private Connection spy(Connection connection) { - if ( MockUtil.isMock( connection ) ) { - return connection; - } - Connection connectionSpy = Mockito.spy( connection ); - try { - Mockito.doAnswer( invocation -> { - PreparedStatement statement = (PreparedStatement) invocation.callRealMethod(); - PreparedStatement statementSpy = Mockito.spy( statement ); - String sql = (String) invocation.getArguments()[0]; - preparedStatementMap.put( statementSpy, sql ); - return statementSpy; - } ).when( connectionSpy ).prepareStatement( ArgumentMatchers.anyString() ); - - Mockito.doAnswer( invocation -> { - Statement statement = (Statement) invocation.callRealMethod(); - Statement statementSpy = Mockito.spy( statement ); - return statementSpy; - } ).when( connectionSpy ).createStatement(); - } - catch ( SQLException e ) { - throw new IllegalArgumentException( e ); - } - return connectionSpy; + return JdbcSpies.spy( connection, spyContext ); } /** @@ -94,8 +66,7 @@ private Connection spy(Connection connection) { public void clear() { acquiredConnections.clear(); releasedConnections.clear(); - preparedStatementMap.keySet().forEach( Mockito::reset ); - preparedStatementMap.clear(); + spyContext.clear(); } /** diff --git a/hibernate-agroal/src/test/resources/hibernate.properties b/hibernate-agroal/src/test/resources/hibernate.properties index da8399b8675f..2ce40bcd75ed 100644 --- a/hibernate-agroal/src/test/resources/hibernate.properties +++ b/hibernate-agroal/src/test/resources/hibernate.properties @@ -16,4 +16,4 @@ hibernate.connection.provider_class AgroalConnectionProvider hibernate.agroal.maxSize 2 hibernate.agroal.acquisitionTimeout PT1s -hibernate.agroal.reapTimeout PT10s \ No newline at end of file +hibernate.agroal.reapTimeout PT10s diff --git a/hibernate-c3p0/hibernate-c3p0.gradle b/hibernate-c3p0/hibernate-c3p0.gradle index 9e68c8a25bbe..b5bce3e18853 100644 --- a/hibernate-c3p0/hibernate-c3p0.gradle +++ b/hibernate-c3p0/hibernate-c3p0.gradle @@ -16,9 +16,5 @@ dependencies { testImplementation project( ':hibernate-testing' ) testImplementation libs.validator testRuntimeOnly jakartaLibs.el - - if ( db.equalsIgnoreCase( 'oracle' ) ) { - testRuntimeOnly dbLibs.oracle - } } diff --git a/hibernate-c3p0/src/main/java/org/hibernate/c3p0/internal/C3P0ConnectionProvider.java b/hibernate-c3p0/src/main/java/org/hibernate/c3p0/internal/C3P0ConnectionProvider.java index a08ca889f224..8035bd2718ab 100644 --- a/hibernate-c3p0/src/main/java/org/hibernate/c3p0/internal/C3P0ConnectionProvider.java +++ b/hibernate-c3p0/src/main/java/org/hibernate/c3p0/internal/C3P0ConnectionProvider.java @@ -18,7 +18,8 @@ import org.hibernate.HibernateException; import org.hibernate.boot.registry.classloading.spi.ClassLoaderService; import org.hibernate.boot.registry.classloading.spi.ClassLoadingException; -import org.hibernate.cfg.Environment; +import org.hibernate.cfg.C3p0Settings; +import org.hibernate.cfg.JdbcSettings; import org.hibernate.engine.jdbc.connections.internal.ConnectionProviderInitiator; import org.hibernate.engine.jdbc.connections.spi.ConnectionProvider; import org.hibernate.internal.util.PropertiesHelper; @@ -31,6 +32,7 @@ import static org.hibernate.c3p0.internal.C3P0MessageLogger.C3P0_LOGGER; import static org.hibernate.c3p0.internal.C3P0MessageLogger.C3P0_MSG_LOGGER; +import static org.hibernate.engine.jdbc.connections.internal.ConnectionProviderInitiator.extractSetting; /** * A connection provider that uses a C3P0 connection pool. Hibernate will use this by @@ -41,6 +43,7 @@ */ public class C3P0ConnectionProvider implements ConnectionProvider, Configurable, Stoppable, ServiceRegistryAwareService { + private static volatile String HIBERNATE_STYLE_SETTING_PREFIX = C3p0Settings.C3P0_CONFIG_PREFIX + "."; //swaldman 2006-08-28: define c3p0-style configuration parameters for properties with // hibernate-specific overrides to detect and warn about conflicting @@ -104,18 +107,28 @@ else if ( DataSource.class.isAssignableFrom( unwrapType ) ) { @Override public void configure(Map props) { - final String jdbcDriverClass = (String) props.get( Environment.DRIVER ); - final String jdbcUrl = (String) props.get( Environment.URL ); + final String jdbcDriverClass = extractSetting( + props, + JdbcSettings.JAKARTA_JDBC_DRIVER, + JdbcSettings.DRIVER, + JdbcSettings.JPA_JDBC_DRIVER + ); + final String jdbcUrl = extractSetting( + props, + JdbcSettings.JAKARTA_JDBC_URL, + JdbcSettings.URL, + JdbcSettings.JPA_JDBC_URL + ); final Properties connectionProps = ConnectionProviderInitiator.getConnectionProperties( props ); C3P0_MSG_LOGGER.c3p0UsingDriver( jdbcDriverClass, jdbcUrl ); C3P0_MSG_LOGGER.connectionProperties( ConfigurationHelper.maskOut( connectionProps, "password" ) ); - autocommit = ConfigurationHelper.getBoolean( Environment.AUTOCOMMIT, props ); + autocommit = ConfigurationHelper.getBoolean( JdbcSettings.AUTOCOMMIT, props ); C3P0_MSG_LOGGER.autoCommitMode( autocommit ); if ( jdbcDriverClass == null ) { - C3P0_MSG_LOGGER.jdbcDriverNotSpecified( Environment.DRIVER ); + C3P0_MSG_LOGGER.jdbcDriverNotSpecified(); } else { try { @@ -129,20 +142,20 @@ public void configure(Map props) { try { //swaldman 2004-02-07: modify to allow null values to signify fall through to c3p0 PoolConfig defaults - final Integer minPoolSize = ConfigurationHelper.getInteger( Environment.C3P0_MIN_SIZE, props ); - final Integer maxPoolSize = ConfigurationHelper.getInteger( Environment.C3P0_MAX_SIZE, props ); - final Integer maxIdleTime = ConfigurationHelper.getInteger( Environment.C3P0_TIMEOUT, props ); - final Integer maxStatements = ConfigurationHelper.getInteger( Environment.C3P0_MAX_STATEMENTS, props ); - final Integer acquireIncrement = ConfigurationHelper.getInteger( Environment.C3P0_ACQUIRE_INCREMENT, props ); - final Integer idleTestPeriod = ConfigurationHelper.getInteger( Environment.C3P0_IDLE_TEST_PERIOD, props ); + final Integer minPoolSize = ConfigurationHelper.getInteger( C3p0Settings.C3P0_MIN_SIZE, props ); + final Integer maxPoolSize = ConfigurationHelper.getInteger( C3p0Settings.C3P0_MAX_SIZE, props ); + final Integer maxIdleTime = ConfigurationHelper.getInteger( C3p0Settings.C3P0_TIMEOUT, props ); + final Integer maxStatements = ConfigurationHelper.getInteger( C3p0Settings.C3P0_MAX_STATEMENTS, props ); + final Integer acquireIncrement = ConfigurationHelper.getInteger( C3p0Settings.C3P0_ACQUIRE_INCREMENT, props ); + final Integer idleTestPeriod = ConfigurationHelper.getInteger( C3p0Settings.C3P0_IDLE_TEST_PERIOD, props ); final Properties c3props = new Properties(); // turn hibernate.c3p0.* into c3p0.*, so c3p0 // gets a chance to see all hibernate.c3p0.* for ( String key : props.keySet() ) { - if ( key.startsWith( "hibernate.c3p0." ) ) { - final String newKey = key.substring( 15 ); + if ( key.startsWith( HIBERNATE_STYLE_SETTING_PREFIX ) ) { + final String newKey = key.substring( HIBERNATE_STYLE_SETTING_PREFIX.length() ); if ( props.containsKey( newKey ) ) { warnPropertyConflict( key, newKey ); } @@ -150,17 +163,13 @@ public void configure(Map props) { } } - setOverwriteProperty( Environment.C3P0_MIN_SIZE, C3P0_STYLE_MIN_POOL_SIZE, props, c3props, minPoolSize ); - setOverwriteProperty( Environment.C3P0_MAX_SIZE, C3P0_STYLE_MAX_POOL_SIZE, props, c3props, maxPoolSize ); - setOverwriteProperty( Environment.C3P0_TIMEOUT, C3P0_STYLE_MAX_IDLE_TIME, props, c3props, maxIdleTime ); + setOverwriteProperty( C3p0Settings.C3P0_MIN_SIZE, C3P0_STYLE_MIN_POOL_SIZE, props, c3props, minPoolSize ); + setOverwriteProperty( C3p0Settings.C3P0_MAX_SIZE, C3P0_STYLE_MAX_POOL_SIZE, props, c3props, maxPoolSize ); + setOverwriteProperty( C3p0Settings.C3P0_TIMEOUT, C3P0_STYLE_MAX_IDLE_TIME, props, c3props, maxIdleTime ); + setOverwriteProperty( C3p0Settings.C3P0_MAX_STATEMENTS, C3P0_STYLE_MAX_STATEMENTS, props, c3props, maxStatements ); + setOverwriteProperty( C3p0Settings.C3P0_ACQUIRE_INCREMENT, C3P0_STYLE_ACQUIRE_INCREMENT, props, c3props, acquireIncrement ); setOverwriteProperty( - Environment.C3P0_MAX_STATEMENTS, C3P0_STYLE_MAX_STATEMENTS, props, c3props, maxStatements - ); - setOverwriteProperty( - Environment.C3P0_ACQUIRE_INCREMENT, C3P0_STYLE_ACQUIRE_INCREMENT, props, c3props, acquireIncrement - ); - setOverwriteProperty( - Environment.C3P0_IDLE_TEST_PERIOD, + C3p0Settings.C3P0_IDLE_TEST_PERIOD, C3P0_STYLE_IDLE_CONNECTION_TEST_PERIOD, props, c3props, diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CUBRIDDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CUBRIDDialect.java index 9dc7a943dfab..d73897a57aa6 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CUBRIDDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CUBRIDDialect.java @@ -54,6 +54,7 @@ import static org.hibernate.type.SqlTypes.BOOLEAN; import static org.hibernate.type.SqlTypes.TIMESTAMP; import static org.hibernate.type.SqlTypes.TIMESTAMP_WITH_TIMEZONE; +import static org.hibernate.type.SqlTypes.TIME_WITH_TIMEZONE; import static org.hibernate.type.SqlTypes.TINYINT; import static org.hibernate.type.SqlTypes.VARBINARY; @@ -83,6 +84,7 @@ protected String columnType(int sqlTypeCode) { //(always 3, millisecond precision) case TIMESTAMP: return "datetime"; + case TIME_WITH_TIMEZONE: case TIMESTAMP_WITH_TIMEZONE: return "datetimetz"; default: @@ -104,7 +106,12 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR //length parameter is measured in bits, not bytes) ddlTypeRegistry.addDescriptor( new DdlTypeImpl( BINARY, "bit($l)", this ) ); ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( VARBINARY, columnType( BLOB ), this ) + CapacityDependentDdlType.builder( + VARBINARY, + CapacityDependentDdlType.LobKind.BIGGEST_LOB, + columnType( BLOB ), + this + ) .withTypeCapacity( getMaxVarbinaryLength(), "bit varying($l)" ) .build() ); @@ -369,7 +376,7 @@ public LimitHandler getLimitHandler() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new CUBRIDIdentityColumnSupport(); + return CUBRIDIdentityColumnSupport.INSTANCE; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CacheDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CacheDialect.java index cb4a0244d783..5db647eaecf9 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CacheDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CacheDialect.java @@ -168,7 +168,7 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.weekQuarter(); functionFactory.daynameMonthname(); functionFactory.toCharNumberDateTimestamp(); - functionFactory.truncate(); + functionFactory.trunc_truncate(); functionFactory.dayofweekmonthyear(); functionFactory.repeat_replicate(); functionFactory.datepartDatename(); @@ -295,7 +295,7 @@ public String getNativeIdentifierGeneratorStrategy() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new CacheIdentityColumnSupport(); + return CacheIdentityColumnSupport.INSTANCE; } // SEQUENCE support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacyDialect.java index 979103fb78a7..510c19611f39 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacyDialect.java @@ -30,19 +30,18 @@ import org.hibernate.dialect.DatabaseVersion; import org.hibernate.dialect.Dialect; import org.hibernate.dialect.NationalizationSupport; +import org.hibernate.dialect.PgJdbcHelper; +import org.hibernate.dialect.PostgreSQLCastingInetJdbcType; +import org.hibernate.dialect.PostgreSQLCastingIntervalSecondJdbcType; +import org.hibernate.dialect.PostgreSQLCastingJsonJdbcType; import org.hibernate.dialect.PostgreSQLDriverKind; -import org.hibernate.dialect.PostgreSQLInetJdbcType; -import org.hibernate.dialect.PostgreSQLIntervalSecondJdbcType; -import org.hibernate.dialect.PostgreSQLJsonJdbcType; -import org.hibernate.dialect.PostgreSQLJsonbJdbcType; -import org.hibernate.dialect.PostgreSQLPGObjectJdbcType; import org.hibernate.dialect.RowLockStrategy; import org.hibernate.dialect.SimpleDatabaseVersion; import org.hibernate.dialect.SpannerDialect; import org.hibernate.dialect.TimeZoneSupport; import org.hibernate.dialect.function.CommonFunctionFactory; import org.hibernate.dialect.function.FormatFunction; -import org.hibernate.dialect.function.PostgreSQLTruncRoundFunction; +import org.hibernate.dialect.function.PostgreSQLTruncFunction; import org.hibernate.dialect.identity.CockroachDBIdentityColumnSupport; import org.hibernate.dialect.identity.IdentityColumnSupport; import org.hibernate.dialect.pagination.LimitHandler; @@ -73,7 +72,6 @@ import org.hibernate.sql.exec.spi.JdbcOperation; import org.hibernate.type.JavaObjectType; import org.hibernate.type.descriptor.jdbc.ArrayJdbcType; -import org.hibernate.type.descriptor.jdbc.InstantAsTimestampWithTimeZoneJdbcType; import org.hibernate.type.descriptor.jdbc.JdbcType; import org.hibernate.type.descriptor.jdbc.ObjectNullAsBinaryTypeJdbcType; import org.hibernate.type.descriptor.jdbc.UUIDJdbcType; @@ -105,14 +103,13 @@ public class CockroachLegacyDialect extends Dialect { private static final CoreMessageLogger LOG = Logger.getMessageLogger( CoreMessageLogger.class, CockroachLegacyDialect.class.getName() ); - private static final CockroachDBIdentityColumnSupport IDENTITY_COLUMN_SUPPORT = new CockroachDBIdentityColumnSupport(); // KNOWN LIMITATIONS: // * no support for java.sql.Clob // Pre-compile and reuse pattern private static final Pattern CRDB_VERSION_PATTERN = Pattern.compile( "v[\\d]+(\\.[\\d]+)?(\\.[\\d]+)?" ); - private static final DatabaseVersion DEFAULT_VERSION = DatabaseVersion.make( 19, 2 ); - private final PostgreSQLDriverKind driverKind; + protected static final DatabaseVersion DEFAULT_VERSION = DatabaseVersion.make( 19, 2 ); + protected final PostgreSQLDriverKind driverKind; public CockroachLegacyDialect() { this( DEFAULT_VERSION ); @@ -196,6 +193,10 @@ protected String columnType(int sqlTypeCode) { case BLOB: return "bytes"; + // We do not use the time with timezone type because PG deprecated it and it lacks certain operations like subtraction +// case TIME_UTC: +// return columnType( TIME_WITH_TIMEZONE ); + case TIMESTAMP_UTC: return columnType( TIMESTAMP_WITH_TIMEZONE ); @@ -228,19 +229,17 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR final DdlTypeRegistry ddlTypeRegistry = typeContributions.getTypeConfiguration().getDdlTypeRegistry(); ddlTypeRegistry.addDescriptor( new DdlTypeImpl( UUID, "uuid", this ) ); - if ( PostgreSQLPGObjectJdbcType.isUsable() ) { - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "geometry", this ) ); - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOGRAPHY, "geography", this ) ); - ddlTypeRegistry.addDescriptor( new Scale6IntervalSecondDdlType( this ) ); - - // Prefer jsonb if possible - if ( getVersion().isSameOrAfter( 20 ) ) { - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( INET, "inet", this ) ); - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "jsonb", this ) ); - } - else { - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "json", this ) ); - } + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "geometry", this ) ); + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOGRAPHY, "geography", this ) ); + ddlTypeRegistry.addDescriptor( new Scale6IntervalSecondDdlType( this ) ); + + // Prefer jsonb if possible + if ( getVersion().isSameOrAfter( 20 ) ) { + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( INET, "inet", this ) ); + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "jsonb", this ) ); + } + else { + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "json", this ) ); } } @@ -272,6 +271,12 @@ public JdbcType resolveSqlTypeDescriptor( break; } break; + case TIME: + // The PostgreSQL JDBC driver reports TIME for timetz, but we use it only for mapping OffsetTime to UTC + if ( "timetz".equals( columnTypeName ) ) { + jdbcTypeCode = TIME_UTC; + } + break; case TIMESTAMP: // The PostgreSQL JDBC driver reports TIMESTAMP for timestamptz, but we use it only for mapping Instant if ( "timestamptz".equals( columnTypeName ) ) { @@ -321,24 +326,49 @@ protected Integer resolveSqlTypeCode(String columnTypeName, TypeConfiguration ty @Override public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { super.contributeTypes( typeContributions, serviceRegistry ); + contributeCockroachTypes( typeContributions, serviceRegistry ); + } + protected void contributeCockroachTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration() .getJdbcTypeRegistry(); - jdbcTypeRegistry.addDescriptor( TIMESTAMP_UTC, InstantAsTimestampWithTimeZoneJdbcType.INSTANCE ); + // Don't use this type due to https://github.com/pgjdbc/pgjdbc/issues/2862 + //jdbcTypeRegistry.addDescriptor( TimestampUtcAsOffsetDateTimeJdbcType.INSTANCE ); if ( driverKind == PostgreSQLDriverKind.PG_JDBC ) { jdbcTypeRegistry.addDescriptorIfAbsent( UUIDJdbcType.INSTANCE ); - if ( PostgreSQLPGObjectJdbcType.isUsable() ) { - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLIntervalSecondJdbcType.INSTANCE ); + if ( PgJdbcHelper.isUsable( serviceRegistry ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getIntervalJdbcType( serviceRegistry ) ); if ( getVersion().isSameOrAfter( 20, 0 ) ) { - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLInetJdbcType.INSTANCE ); - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLJsonbJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getInetJdbcType( serviceRegistry ) ); + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getJsonbJdbcType( serviceRegistry ) ); } else { - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLJsonJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getJsonJdbcType( serviceRegistry ) ); + } + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingIntervalSecondJdbcType.INSTANCE ); + if ( getVersion().isSameOrAfter( 20, 0 ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingInetJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSONB_INSTANCE ); + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSON_INSTANCE ); } } } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( UUIDJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingIntervalSecondJdbcType.INSTANCE ); + if ( getVersion().isSameOrAfter( 20, 0 ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingInetJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSONB_INSTANCE ); + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSON_INSTANCE ); + } + } // Force Blob binding to byte[] for CockroachDB jdbcTypeRegistry.addDescriptor( Types.BLOB, VarbinaryJdbcType.INSTANCE ); @@ -402,7 +432,13 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionContributions.getFunctionRegistry().register( "format", - new FormatFunction( "experimental_strftime", functionContributions.getTypeConfiguration() ) + new FormatFunction( + "experimental_strftime", + false, + true, + false, + functionContributions.getTypeConfiguration() + ) ); functionFactory.windowFunctions(); functionFactory.listagg_stringAgg( "string" ); @@ -410,7 +446,11 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.hypotheticalOrderedSetAggregates_windowEmulation(); functionContributions.getFunctionRegistry().register( - "trunc", new PostgreSQLTruncRoundFunction( "trunc", getVersion().isSameOrAfter( 22, 2 ) ) + "trunc", + new PostgreSQLTruncFunction( + getVersion().isSameOrAfter( 22, 2 ), + functionContributions.getTypeConfiguration() + ) ); functionContributions.getFunctionRegistry().registerAlternateKey( "truncate", "trunc" ); } @@ -472,7 +512,7 @@ public boolean qualifyIndexName() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return IDENTITY_COLUMN_SUPPORT; + return CockroachDBIdentityColumnSupport.INSTANCE; } @Override @@ -732,22 +772,30 @@ public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalT if ( unit == null ) { return "(?3-?2)"; } - switch (unit) { - case YEAR: - return "(extract(year from ?3)-extract(year from ?2))"; - case QUARTER: - return "(extract(year from ?3)*4-extract(year from ?2)*4+extract(month from ?3)//3-extract(month from ?2)//3)"; - case MONTH: - return "(extract(year from ?3)*12-extract(year from ?2)*12+extract(month from ?3)-extract(month from ?2))"; - } - if ( toTemporalType != TemporalType.TIMESTAMP && fromTemporalType != TemporalType.TIMESTAMP ) { + if ( toTemporalType == TemporalType.DATE && fromTemporalType == TemporalType.DATE ) { // special case: subtraction of two dates // results in an integer number of days // instead of an INTERVAL - return "(?3-?2)" + DAY.conversionFactor( unit, this ); + switch ( unit ) { + case YEAR: + case MONTH: + case QUARTER: + // age only supports timestamptz, so we have to cast the date expressions + return "extract(" + translateDurationField( unit ) + " from age(cast(?3 as timestamptz),cast(?2 as timestamptz)))"; + default: + return "(?3-?2)" + DAY.conversionFactor( unit, this ); + } } else { switch (unit) { + case YEAR: + return "extract(year from ?3-?2)"; + case QUARTER: + return "(extract(year from ?3-?2)*4+extract(month from ?3-?2)//3)"; + case MONTH: + return "(extract(year from ?3-?2)*12+extract(month from ?3-?2))"; + // Prior to v20, Cockroach didn't support extracting from an interval/duration, + // so we use the extract_duration function case WEEK: return "extract_duration(hour from ?3-?2)/168"; case DAY: diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacySqlAstTranslator.java index adee88464bfb..84048cac6a59 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CockroachLegacySqlAstTranslator.java @@ -15,6 +15,7 @@ import org.hibernate.sql.ast.tree.expression.Literal; import org.hibernate.sql.ast.tree.expression.Summarization; import org.hibernate.sql.ast.tree.predicate.BooleanExpressionPredicate; +import org.hibernate.sql.ast.tree.predicate.InArrayPredicate; import org.hibernate.sql.ast.tree.predicate.LikePredicate; import org.hibernate.sql.ast.tree.select.QueryGroup; import org.hibernate.sql.ast.tree.select.QueryPart; @@ -183,4 +184,12 @@ public void visitLikePredicate(LikePredicate likePredicate) { protected boolean supportsRowValueConstructorSyntaxInQuantifiedPredicates() { return false; } + + @Override + public void visitInArrayPredicate(InArrayPredicate inArrayPredicate) { + inArrayPredicate.getTestExpression().accept( this ); + appendSql( " = ANY(" ); + inArrayPredicate.getArrayParameter().accept( this ); + appendSql( ')' ); + } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CommunityDialectSelector.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CommunityDialectSelector.java index 2025ce79289b..0c7a9167ad66 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CommunityDialectSelector.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/CommunityDialectSelector.java @@ -20,12 +20,24 @@ public Class resolve(String name) { return null; } switch ( name ) { + case "DB297": + return DB297Dialect.class; + case "DB2390": + return DB2390Dialect.class; + case "DB2390V8": + return DB2390V8Dialect.class; case "Cache": return CacheDialect.class; case "Cache71": return Cache71Dialect.class; case "CUBRID": return CUBRIDDialect.class; + case "DerbyTenFive": + return DerbyTenFiveDialect.class; + case "DerbyTenSix": + return DerbyTenSixDialect.class; + case "DerbyTenSeven": + return DerbyTenSevenDialect.class; case "Firebird": return FirebirdDialect.class; case "Informix": @@ -38,16 +50,56 @@ public Class resolve(String name) { return Ingres9Dialect.class; case "Ingres10": return Ingres10Dialect.class; + case "MariaDB53": + return MariaDB53Dialect.class; + case "MariaDB10": + return MariaDB10Dialect.class; + case "MariaDB102": + return MariaDB102Dialect.class; case "MimerSQL": return MimerSQLDialect.class; + case "MySQL5": + return MySQL5Dialect.class; + case "MySQL55": + return MySQL55Dialect.class; + case "Oracle8i": + return Oracle8iDialect.class; + case "Oracle9i": + return Oracle9iDialect.class; + case "Oracle10g": + return Oracle10gDialect.class; + case "PostgreSQL81": + return PostgreSQL81Dialect.class; + case "PostgreSQL82": + return PostgreSQL82Dialect.class; + case "PostgreSQL9": + return PostgreSQL9Dialect.class; + case "PostgreSQL91": + return PostgreSQL91Dialect.class; + case "PostgreSQL92": + return PostgreSQL92Dialect.class; + case "PostgreSQL93": + return PostgreSQL93Dialect.class; + case "PostgreSQL94": + return PostgreSQL94Dialect.class; + case "PostgreSQL95": + return PostgreSQL95Dialect.class; case "RDMSOS2200": return RDMSOS2200Dialect.class; case "SAPDB": return SAPDBDialect.class; + case "SQLServer2005": + return SQLServer2005Dialect.class; case "MaxDB": return MaxDBDialect.class; + case "Sybase11": + return Sybase11Dialect.class; case "SybaseAnywhere": return SybaseAnywhereDialect.class; + case "SybaseASE15": + return SybaseASE15Dialect.class; + case "SybaseASE157": + return SybaseASE157Dialect.class; case "Teradata": return TeradataDialect.class; case "Teradata14": diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/DB2390Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2390Dialect.java similarity index 90% rename from hibernate-core/src/main/java/org/hibernate/dialect/DB2390Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2390Dialect.java index 8ddbdafc12c1..5db5fd6f961d 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/DB2390Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2390Dialect.java @@ -4,8 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; +import org.hibernate.dialect.DB2zDialect; import org.hibernate.dialect.identity.DB2zIdentityColumnSupport; import org.hibernate.dialect.identity.IdentityColumnSupport; import org.hibernate.dialect.pagination.FetchLimitHandler; @@ -24,7 +25,7 @@ * @deprecated Use {@link DB2zDialect} */ @Deprecated -public class DB2390Dialect extends DB2Dialect { +public class DB2390Dialect extends DB2LegacyDialect { private final int version; @@ -64,6 +65,6 @@ public LimitHandler getLimitHandler() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new DB2zIdentityColumnSupport(); + return DB2zIdentityColumnSupport.INSTANCE; } } diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/DB2390V8Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2390V8Dialect.java similarity index 92% rename from hibernate-core/src/main/java/org/hibernate/dialect/DB2390V8Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2390V8Dialect.java index d96792dd5685..6882665a570d 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/DB2390V8Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2390V8Dialect.java @@ -4,7 +4,7 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; /** diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/DB297Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB297Dialect.java similarity index 75% rename from hibernate-core/src/main/java/org/hibernate/dialect/DB297Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB297Dialect.java index 0970aa569d04..18b1f3d4067a 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/DB297Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB297Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for DB2 9.7. @@ -13,7 +15,7 @@ * @deprecated use {@code DB2Dialect(970)} */ @Deprecated -public class DB297Dialect extends DB2Dialect { +public class DB297Dialect extends DB2LegacyDialect { public DB297Dialect() { super( DatabaseVersion.make( 9, 7 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2LegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2LegacyDialect.java index 0043eb9d01cd..beaa8b582757 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2LegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2LegacyDialect.java @@ -11,7 +11,11 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; +import java.time.temporal.TemporalAccessor; +import java.util.Calendar; +import java.util.Date; import java.util.List; +import java.util.TimeZone; import org.hibernate.LockOptions; import org.hibernate.boot.model.FunctionContributions; @@ -96,11 +100,16 @@ import static org.hibernate.type.SqlTypes.DECIMAL; import static org.hibernate.type.SqlTypes.NUMERIC; import static org.hibernate.type.SqlTypes.SQLXML; +import static org.hibernate.type.SqlTypes.TIME; import static org.hibernate.type.SqlTypes.TIMESTAMP_WITH_TIMEZONE; import static org.hibernate.type.SqlTypes.TIME_WITH_TIMEZONE; import static org.hibernate.type.SqlTypes.TINYINT; import static org.hibernate.type.SqlTypes.VARBINARY; import static org.hibernate.type.SqlTypes.VARCHAR; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsDate; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsLocalTime; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithMillis; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithNanos; /** * A {@linkplain Dialect SQL dialect} for DB2. @@ -187,6 +196,7 @@ protected String columnType(int sqlTypeCode) { return "clob"; case TIMESTAMP_WITH_TIMEZONE: return "timestamp($p)"; + case TIME: case TIME_WITH_TIMEZONE: return "time"; case BINARY: @@ -295,8 +305,6 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.octetLength(); functionFactory.ascii(); functionFactory.char_chr(); - functionFactory.trunc(); -// functionFactory.truncate(); functionFactory.insert(); functionFactory.characterLength_length( SqlAstNodeRenderingMode.DEFAULT ); functionFactory.stddev(); @@ -312,6 +320,7 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.varPopSamp(); functionFactory.varianceSamp(); functionFactory.dateTrunc(); + functionFactory.trunc_dateTrunc(); } else { // Before version 11, the position function required the use of the code units @@ -327,7 +336,7 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.stddevSamp_sumCount(); functionContributions.getFunctionRegistry().registerAlternateKey( "var_pop", "variance" ); functionFactory.varSamp_sumCount(); - functionFactory.dateTrunc_trunc(); + functionFactory.trunc_dateTrunc_trunc(); } functionFactory.addYearsMonthsDaysHoursMinutesSeconds(); @@ -417,9 +426,37 @@ public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalT if ( getDB2Version().isBefore( 11 ) ) { return DB2Dialect.timestampdiffPatternV10( unit, fromTemporalType, toTemporalType ); } - StringBuilder pattern = new StringBuilder(); - boolean castFrom = fromTemporalType != TemporalType.TIMESTAMP && !unit.isDateUnit(); - boolean castTo = toTemporalType != TemporalType.TIMESTAMP && !unit.isDateUnit(); + final StringBuilder pattern = new StringBuilder(); + final String fromExpression; + final String toExpression; + if ( unit.isDateUnit() ) { + fromExpression = "?2"; + toExpression = "?3"; + } + else { + switch ( fromTemporalType ) { + case DATE: + fromExpression = "cast(?2 as timestamp)"; + break; + case TIME: + fromExpression = "timestamp('1970-01-01',?2)"; + break; + default: + fromExpression = "?2"; + break; + } + switch ( toTemporalType ) { + case DATE: + toExpression = "cast(?3 as timestamp)"; + break; + case TIME: + toExpression = "timestamp('1970-01-01',?3)"; + break; + default: + toExpression = "?3"; + break; + } + } switch ( unit ) { case NATIVE: case NANOSECOND: @@ -435,26 +472,24 @@ public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalT default: pattern.append( "?1s_between(" ); } - if ( castTo ) { - pattern.append( "cast(?3 as timestamp)" ); - } - else { - pattern.append( "?3" ); - } + pattern.append( toExpression ); pattern.append( ',' ); - if ( castFrom ) { - pattern.append( "cast(?2 as timestamp)" ); - } - else { - pattern.append( "?2" ); - } + pattern.append( fromExpression ); pattern.append( ')' ); switch ( unit ) { case NATIVE: - pattern.append( "+(microsecond(?3)-microsecond(?2))/1e6)" ); + pattern.append( "+(microsecond("); + pattern.append( toExpression ); + pattern.append(")-microsecond("); + pattern.append( fromExpression ); + pattern.append("))/1e6)" ); break; case NANOSECOND: - pattern.append( "*1e9+(microsecond(?3)-microsecond(?2))*1e3)" ); + pattern.append( "*1e9+(microsecond("); + pattern.append( toExpression ); + pattern.append(")-microsecond("); + pattern.append( fromExpression ); + pattern.append("))*1e3)" ); break; case MONTH: pattern.append( ')' ); @@ -469,19 +504,24 @@ public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalT @Override public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, IntervalType intervalType) { final StringBuilder pattern = new StringBuilder(); - final boolean castTo; + final String timestampExpression; if ( unit.isDateUnit() ) { - castTo = temporalType == TemporalType.TIME; - } - else { - castTo = temporalType == TemporalType.DATE; - } - if (castTo) { - pattern.append("cast(?3 as timestamp)"); + if ( temporalType == TemporalType.TIME ) { + timestampExpression = "timestamp('1970-01-01',?3)"; + } + else { + timestampExpression = "?3"; + } } else { - pattern.append("?3"); + if ( temporalType == TemporalType.DATE ) { + timestampExpression = "cast(?3 as timestamp)"; + } + else { + timestampExpression = "?3"; + } } + pattern.append(timestampExpression); pattern.append("+("); // DB2 supports temporal arithmetic. See https://www.ibm.com/support/knowledgecenter/en/SSEPGG_9.7.0/com.ibm.db2.luw.sql.ref.doc/doc/r0023457.html switch (unit) { @@ -504,6 +544,83 @@ public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, return pattern.toString(); } + @Override + public void appendDateTimeLiteral( + SqlAppender appender, + TemporalAccessor temporalAccessor, + TemporalType precision, + TimeZone jdbcTimeZone) { + switch ( precision ) { + case DATE: + appender.appendSql( "date '" ); + appendAsDate( appender, temporalAccessor ); + appender.appendSql( '\'' ); + break; + case TIME: + appender.appendSql( "time '" ); + appendAsLocalTime( appender, temporalAccessor ); + appender.appendSql( '\'' ); + break; + case TIMESTAMP: + appender.appendSql( "timestamp '" ); + appendAsTimestampWithNanos( appender, temporalAccessor, false, jdbcTimeZone ); + appender.appendSql( '\'' ); + break; + default: + throw new IllegalArgumentException(); + } + } + + @Override + public void appendDateTimeLiteral(SqlAppender appender, Date date, TemporalType precision, TimeZone jdbcTimeZone) { + switch ( precision ) { + case DATE: + appender.appendSql( "date '" ); + appendAsDate( appender, date ); + appender.appendSql( '\'' ); + break; + case TIME: + appender.appendSql( "time '" ); + appendAsLocalTime( appender, date ); + appender.appendSql( '\'' ); + break; + case TIMESTAMP: + appender.appendSql( "timestamp '" ); + appendAsTimestampWithNanos( appender, date, jdbcTimeZone ); + appender.appendSql( '\'' ); + break; + default: + throw new IllegalArgumentException(); + } + } + + @Override + public void appendDateTimeLiteral( + SqlAppender appender, + Calendar calendar, + TemporalType precision, + TimeZone jdbcTimeZone) { + switch ( precision ) { + case DATE: + appender.appendSql( "date '" ); + appendAsDate( appender, calendar ); + appender.appendSql( '\'' ); + break; + case TIME: + appender.appendSql( "time '" ); + appendAsLocalTime( appender, calendar ); + appender.appendSql( '\'' ); + break; + case TIMESTAMP: + appender.appendSql( "timestamp '" ); + appendAsTimestampWithMillis( appender, calendar, jdbcTimeZone ); + appender.appendSql( '\'' ); + break; + default: + throw new IllegalArgumentException(); + } + } + @Override public String getLowercaseFunction() { return getDB2Version().isBefore( 9, 7 ) ? "lcase" : super.getLowercaseFunction(); @@ -817,7 +934,7 @@ protected SqlAstTranslator buildTranslator( @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new DB2IdentityColumnSupport(); + return DB2IdentityColumnSupport.INSTANCE; } @Override @@ -895,6 +1012,12 @@ public String extractPattern(TemporalUnit unit) { return "dayofweek(?2)"; case QUARTER: return "quarter(?2)"; + case EPOCH: + if ( getDB2Version().isBefore( 11 ) ) { + return timestampdiffPattern( TemporalUnit.SECOND, TemporalType.TIMESTAMP, TemporalType.TIMESTAMP ) + .replace( "?2", "'1970-01-01 00:00:00'" ) + .replace( "?3", "?2" ); + } } return super.extractPattern( unit ); } @@ -940,4 +1063,20 @@ public String getTruncateTableStatement(String tableName) { public String getCreateUserDefinedTypeExtensionsString() { return " instantiable mode db2sql"; } + + /** + * The more "standard" syntax is {@code rid_bit(alias)} but here we use {@code alias.rowid}. + *

+ * There is also an alternative {@code rid()} of type {@code bigint}, but it cannot be used + * with partitioning. + */ + @Override + public String rowId(String rowId) { + return "rowid"; + } + + @Override + public int rowIdSqlType() { + return VARBINARY; + } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2iLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2iLegacyDialect.java index 3e1f867ea445..4f4414979359 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2iLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2iLegacyDialect.java @@ -9,6 +9,7 @@ import org.hibernate.boot.model.FunctionContributions; import org.hibernate.dialect.DatabaseVersion; import org.hibernate.dialect.function.CommonFunctionFactory; +import org.hibernate.dialect.function.DB2SubstringFunction; import org.hibernate.dialect.identity.DB2390IdentityColumnSupport; import org.hibernate.dialect.identity.DB2IdentityColumnSupport; import org.hibernate.dialect.identity.IdentityColumnSupport; @@ -57,9 +58,14 @@ public DB2iLegacyDialect(DatabaseVersion version) { @Override public void initializeFunctionRegistry(FunctionContributions functionContributions) { - super.initializeFunctionRegistry(functionContributions); + super.initializeFunctionRegistry( functionContributions ); + // DB2 for i doesn't allow code units: https://www.ibm.com/docs/en/i/7.1.0?topic=functions-substring + functionContributions.getFunctionRegistry().register( + "substring", + new DB2SubstringFunction( false, functionContributions.getTypeConfiguration() ) + ); if ( getVersion().isSameOrAfter( 7, 2 ) ) { - CommonFunctionFactory functionFactory = new CommonFunctionFactory(functionContributions); + CommonFunctionFactory functionFactory = new CommonFunctionFactory( functionContributions ); functionFactory.listagg( null ); functionFactory.inverseDistributionOrderedSetAggregates(); functionFactory.hypotheticalOrderedSetAggregates_windowEmulation(); @@ -110,7 +116,7 @@ public SequenceSupport getSequenceSupport() { @Override public String getQuerySequencesString() { if ( getVersion().isSameOrAfter(7,3) ) { - return "select distinct sequence_name from qsys2.syssequences " + + return "select distinct sequence_schema as seqschema, sequence_name as seqname, START, minimum_value as minvalue, maximum_value as maxvalue, increment from qsys2.syssequences " + "where current_schema='*LIBL' and sequence_schema in (select schema_name from qsys2.library_list_info) " + "or sequence_schema=current_schema"; } @@ -128,8 +134,8 @@ public LimitHandler getLimitHandler() { @Override public IdentityColumnSupport getIdentityColumnSupport() { return getVersion().isSameOrAfter(7, 3) - ? new DB2IdentityColumnSupport() - : new DB2390IdentityColumnSupport(); + ? DB2IdentityColumnSupport.INSTANCE + : DB2390IdentityColumnSupport.INSTANCE; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2zLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2zLegacyDialect.java index 9a805b1d8844..72e3fb000078 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2zLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DB2zLegacyDialect.java @@ -37,7 +37,9 @@ import java.util.List; +import static org.hibernate.type.SqlTypes.ROWID; import static org.hibernate.type.SqlTypes.TIMESTAMP_WITH_TIMEZONE; +import static org.hibernate.type.SqlTypes.TIME_WITH_TIMEZONE; /** * An SQL dialect for DB2 for z/OS, previously known as known as Db2 UDB for z/OS and Db2 UDB for z/OS and OS/390. @@ -74,9 +76,13 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio @Override protected String columnType(int sqlTypeCode) { - if ( sqlTypeCode == TIMESTAMP_WITH_TIMEZONE && getVersion().isAfter( 10 ) ) { - // See https://www.ibm.com/support/knowledgecenter/SSEPEK_10.0.0/wnew/src/tpc/db2z_10_timestamptimezone.html - return "timestamp with time zone"; + if ( getVersion().isAfter( 10 ) ) { + switch ( sqlTypeCode ) { + case TIME_WITH_TIMEZONE: + case TIMESTAMP_WITH_TIMEZONE: + // See https://www.ibm.com/support/knowledgecenter/SSEPEK_10.0.0/wnew/src/tpc/db2z_10_timestamptimezone.html + return "timestamp with time zone"; + } } return super.columnType( sqlTypeCode ); } @@ -140,7 +146,7 @@ public LimitHandler getLimitHandler() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new DB2390IdentityColumnSupport(); + return DB2390IdentityColumnSupport.INSTANCE; } @Override @@ -160,14 +166,7 @@ public boolean supportsRecursiveCTE() { @Override public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, IntervalType intervalType) { - StringBuilder pattern = new StringBuilder(); - final boolean castTo; - if ( unit.isDateUnit() ) { - castTo = temporalType == TemporalType.TIME; - } - else { - castTo = temporalType == TemporalType.DATE; - } + final StringBuilder pattern = new StringBuilder(); pattern.append("add_"); switch (unit) { case NATIVE: @@ -185,12 +184,24 @@ public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, pattern.append("?1"); } pattern.append("s("); - if (castTo) { - pattern.append("cast(?3 as timestamp)"); + final String timestampExpression; + if ( unit.isDateUnit() ) { + if ( temporalType == TemporalType.TIME ) { + timestampExpression = "timestamp('1970-01-01',?3)"; + } + else { + timestampExpression = "?3"; + } } else { - pattern.append("?3"); + if ( temporalType == TemporalType.DATE ) { + timestampExpression = "cast(?3 as timestamp)"; + } + else { + timestampExpression = "?3"; + } } + pattern.append(timestampExpression); pattern.append(","); switch (unit) { case NANOSECOND: @@ -219,4 +230,23 @@ protected SqlAstTranslator buildTranslator( } }; } + + // I speculate that this is a correct implementation of rowids for DB2 for z/OS, + // just on the basis of the DB2 docs, but I currently have no way to test it + // Note that the implementation inherited from DB2Dialect for LUW will not work! + + @Override + public String rowId(String rowId) { + return rowId == null || rowId.isEmpty() ? "rowid_" : rowId; + } + + @Override + public int rowIdSqlType() { + return ROWID; + } + + @Override + public String getRowIdColumnString(String rowId) { + return rowId( rowId ) + " rowid not null generated always"; + } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacyDialect.java index 1cf8dc240629..0125723c42f9 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacyDialect.java @@ -92,8 +92,10 @@ import static org.hibernate.type.SqlTypes.NCLOB; import static org.hibernate.type.SqlTypes.NUMERIC; import static org.hibernate.type.SqlTypes.NVARCHAR; +import static org.hibernate.type.SqlTypes.TIME; import static org.hibernate.type.SqlTypes.TIMESTAMP; import static org.hibernate.type.SqlTypes.TIMESTAMP_WITH_TIMEZONE; +import static org.hibernate.type.SqlTypes.TIME_WITH_TIMEZONE; import static org.hibernate.type.SqlTypes.TINYINT; import static org.hibernate.type.SqlTypes.VARBINARY; import static org.hibernate.type.SqlTypes.VARCHAR; @@ -162,6 +164,10 @@ protected String columnType(int sqlTypeCode) { case NCLOB: return "clob"; + case TIME: + case TIME_WITH_TIMEZONE: + return "time"; + case TIMESTAMP: case TIMESTAMP_WITH_TIMEZONE: return "timestamp"; @@ -179,23 +185,55 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR int varcharDdlTypeCapacity = 32_672; ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( VARBINARY, columnType( LONG32VARBINARY ), columnType( VARBINARY ), this ) + CapacityDependentDdlType.builder( + VARBINARY, + isLob( LONG32VARBINARY ) + ? CapacityDependentDdlType.LobKind.BIGGEST_LOB + : CapacityDependentDdlType.LobKind.NONE, + columnType( LONG32VARBINARY ), + columnType( VARBINARY ), + this + ) .withTypeCapacity( varcharDdlTypeCapacity, columnType( VARBINARY ) ) .build() ); ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( VARCHAR, columnType( LONG32VARCHAR ), columnType( VARCHAR ), this ) + CapacityDependentDdlType.builder( + VARCHAR, + isLob( LONG32VARCHAR ) + ? CapacityDependentDdlType.LobKind.BIGGEST_LOB + : CapacityDependentDdlType.LobKind.NONE, + columnType( LONG32VARCHAR ), + columnType( VARCHAR ), + this + ) .withTypeCapacity( varcharDdlTypeCapacity, columnType( VARCHAR ) ) .build() ); ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( NVARCHAR, columnType( LONG32VARCHAR ), columnType( NVARCHAR ), this ) + CapacityDependentDdlType.builder( + NVARCHAR, + isLob( LONG32NVARCHAR ) + ? CapacityDependentDdlType.LobKind.BIGGEST_LOB + : CapacityDependentDdlType.LobKind.NONE, + columnType( LONG32VARCHAR ), + columnType( NVARCHAR ), + this + ) .withTypeCapacity( varcharDdlTypeCapacity, columnType( NVARCHAR ) ) .build() ); ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( BINARY, columnType( LONG32VARBINARY ), columnType( VARBINARY ), this ) + CapacityDependentDdlType.builder( + BINARY, + isLob( LONG32VARBINARY ) + ? CapacityDependentDdlType.LobKind.BIGGEST_LOB + : CapacityDependentDdlType.LobKind.NONE, + columnType( LONG32VARBINARY ), + columnType( VARBINARY ), + this + ) .withTypeCapacity( 254, "char($l) for bit data" ) .withTypeCapacity( varcharDdlTypeCapacity, columnType( VARBINARY ) ) .build() @@ -203,13 +241,29 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR // This is the maximum size for the CHAR datatype on Derby ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( CHAR, columnType( LONG32VARCHAR ), columnType( CHAR ), this ) + CapacityDependentDdlType.builder( + CHAR, + isLob( LONG32VARCHAR ) + ? CapacityDependentDdlType.LobKind.BIGGEST_LOB + : CapacityDependentDdlType.LobKind.NONE, + columnType( LONG32VARCHAR ), + columnType( CHAR ), + this + ) .withTypeCapacity( 254, columnType( CHAR ) ) .withTypeCapacity( getMaxVarcharLength(), columnType( VARCHAR ) ) .build() ); ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( NCHAR, columnType( LONG32NVARCHAR ), columnType( NCHAR ), this ) + CapacityDependentDdlType.builder( + NCHAR, + isLob( LONG32NVARCHAR ) + ? CapacityDependentDdlType.LobKind.BIGGEST_LOB + : CapacityDependentDdlType.LobKind.NONE, + columnType( LONG32NVARCHAR ), + columnType( NCHAR ), + this + ) .withTypeCapacity( 254, columnType( NCHAR ) ) .withTypeCapacity( getMaxVarcharLength(), columnType( NVARCHAR ) ) .build() @@ -383,6 +437,8 @@ public String extractPattern(TemporalUnit unit) { return "(({fn timestampdiff(sql_tsi_day,date(char(year(?2),4)||'-01-01'),{fn timestampadd(sql_tsi_day,{fn timestampdiff(sql_tsi_day,{d '1753-01-01'},?2)}/7*7,{d '1753-01-04'})})}+7)/7)"; case QUARTER: return "((month(?2)+2)/3)"; + case EPOCH: + return "{fn timestampdiff(sql_tsi_second,{ts '1970-01-01 00:00:00'},?2)}"; default: return "?1(?2)"; } @@ -581,7 +637,7 @@ public LimitHandler getLimitHandler() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new DB2IdentityColumnSupport(); + return DB2IdentityColumnSupport.INSTANCE; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacySqlAstTranslator.java index a17629b0c117..deee312b080e 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyLegacySqlAstTranslator.java @@ -210,7 +210,7 @@ public void visitInListPredicate(InListPredicate inListPredicate) { if ( inListPredicate.isNegated() ) { appendSql( " not" ); } - appendSql( " in(" ); + appendSql( " in (" ); renderCommaSeparated( listExpressions ); appendSql( CLOSE_PARENTHESIS ); } @@ -263,6 +263,11 @@ private boolean supportsOffsetFetchClause() { return getDialect().getVersion().isSameOrAfter( 10, 5 ); } + @Override + protected boolean supportsJoinInMutationStatementSubquery() { + return false; + } + @Override public void visitBinaryArithmeticExpression(BinaryArithmeticExpression arithmeticExpression) { final BinaryArithmeticOperator operator = arithmeticExpression.getOperator(); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenFiveDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenFiveDialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenFiveDialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenFiveDialect.java index 5896ae4b75ed..2527f37c44db 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenFiveDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenFiveDialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * Dialect for Derby/Cloudscape 10.5 @@ -15,7 +17,7 @@ * @deprecated use {@code DerbyDialect(1050)} */ @Deprecated -public class DerbyTenFiveDialect extends DerbyDialect { +public class DerbyTenFiveDialect extends DerbyLegacyDialect { public DerbyTenFiveDialect() { super( DatabaseVersion.make( 10, 5 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenSevenDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenSevenDialect.java similarity index 74% rename from hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenSevenDialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenSevenDialect.java index c1248dc68c88..6e7313085933 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenSevenDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenSevenDialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * Dialect for Derby 10.7 @@ -14,7 +16,7 @@ * @deprecated use {@code DerbyDialect(1070)} */ @Deprecated -public class DerbyTenSevenDialect extends DerbyDialect { +public class DerbyTenSevenDialect extends DerbyLegacyDialect { public DerbyTenSevenDialect() { super( DatabaseVersion.make( 10, 7 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenSixDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenSixDialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenSixDialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenSixDialect.java index dcc7b5855303..6309dd31aef5 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/DerbyTenSixDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/DerbyTenSixDialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * Dialect for Derby/Cloudscape 10.6 @@ -15,7 +17,7 @@ * @deprecated use {@code DerbyDialect(1060)} */ @Deprecated -public class DerbyTenSixDialect extends DerbyDialect { +public class DerbyTenSixDialect extends DerbyLegacyDialect { public DerbyTenSixDialect() { super( DatabaseVersion.make( 10, 6 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdDialect.java index 26cdce8790e8..c476aa5ac9d5 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdDialect.java @@ -150,7 +150,7 @@ protected String columnType(int sqlTypeCode) { case TIMESTAMP: return "timestamp"; case TIME_WITH_TIMEZONE: - return getVersion().isBefore( 4, 0 ) ? "time" : super.columnType( sqlTypeCode ); + return getVersion().isBefore( 4, 0 ) ? "time" : "time with time zone"; case TIMESTAMP_WITH_TIMEZONE: return getVersion().isBefore( 4, 0 ) ? "timestamp" : "timestamp with time zone"; case BINARY: @@ -625,7 +625,7 @@ public void appendBooleanValueString(SqlAppender appender, boolean bool) { public IdentityColumnSupport getIdentityColumnSupport() { return getVersion().isBefore( 3, 0 ) ? super.getIdentityColumnSupport() - : new FirebirdIdentityColumnSupport(); + : FirebirdIdentityColumnSupport.INSTANCE; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdSqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdSqlAstTranslator.java index 63a0f0f9c29c..4eaa45abe66c 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdSqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/FirebirdSqlAstTranslator.java @@ -238,7 +238,7 @@ public void visitInListPredicate(InListPredicate inListPredicate) { if ( inListPredicate.isNegated() ) { appendSql( " not" ); } - appendSql( " in(" ); + appendSql( " in (" ); renderCommaSeparated( listExpressions ); appendSql( CLOSE_PARENTHESIS ); } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacyDialect.java index 8f2f044033fe..ee5012beb864 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacyDialect.java @@ -17,6 +17,7 @@ import java.util.TimeZone; import org.hibernate.PessimisticLockException; +import org.hibernate.QueryTimeoutException; import org.hibernate.boot.model.FunctionContributions; import org.hibernate.boot.model.TypeContributions; import org.hibernate.dialect.DatabaseVersion; @@ -75,8 +76,12 @@ import org.hibernate.tool.schema.extract.internal.SequenceInformationExtractorLegacyImpl; import org.hibernate.tool.schema.extract.internal.SequenceInformationExtractorNoOpImpl; import org.hibernate.tool.schema.extract.spi.SequenceInformationExtractor; -import org.hibernate.type.descriptor.jdbc.InstantJdbcType; +import org.hibernate.type.descriptor.jdbc.H2FormatJsonJdbcType; import org.hibernate.type.descriptor.jdbc.JdbcType; +import org.hibernate.type.descriptor.jdbc.TimeAsTimestampWithTimeZoneJdbcType; +import org.hibernate.type.descriptor.jdbc.TimeUtcAsJdbcTimeJdbcType; +import org.hibernate.type.descriptor.jdbc.TimeUtcAsOffsetTimeJdbcType; +import org.hibernate.type.descriptor.jdbc.TimestampUtcAsInstantJdbcType; import org.hibernate.type.descriptor.jdbc.UUIDJdbcType; import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; import org.hibernate.type.descriptor.sql.internal.DdlTypeImpl; @@ -87,6 +92,7 @@ import static org.hibernate.query.sqm.TemporalUnit.SECOND; import static org.hibernate.type.SqlTypes.ARRAY; +import static org.hibernate.type.SqlTypes.BIGINT; import static org.hibernate.type.SqlTypes.BINARY; import static org.hibernate.type.SqlTypes.CHAR; import static org.hibernate.type.SqlTypes.DECIMAL; @@ -94,6 +100,7 @@ import static org.hibernate.type.SqlTypes.FLOAT; import static org.hibernate.type.SqlTypes.GEOMETRY; import static org.hibernate.type.SqlTypes.INTERVAL_SECOND; +import static org.hibernate.type.SqlTypes.JSON; import static org.hibernate.type.SqlTypes.LONG32NVARCHAR; import static org.hibernate.type.SqlTypes.LONG32VARBINARY; import static org.hibernate.type.SqlTypes.LONG32VARCHAR; @@ -102,6 +109,8 @@ import static org.hibernate.type.SqlTypes.NVARCHAR; import static org.hibernate.type.SqlTypes.OTHER; import static org.hibernate.type.SqlTypes.TIMESTAMP_UTC; +import static org.hibernate.type.SqlTypes.TIMESTAMP_WITH_TIMEZONE; +import static org.hibernate.type.SqlTypes.TIME_WITH_TIMEZONE; import static org.hibernate.type.SqlTypes.UUID; import static org.hibernate.type.SqlTypes.VARBINARY; import static org.hibernate.type.SqlTypes.VARCHAR; @@ -110,7 +119,6 @@ import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTime; import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithMillis; import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithNanos; -import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithMillis; /** * A legacy {@linkplain Dialect SQL dialect} for H2. @@ -199,6 +207,12 @@ public boolean supportsStandardArrays() { return getVersion().isSameOrAfter( 2 ); } + @Override + public boolean useArrayForMultiValuedParameters() { + // Performance is worse than the in-predicate version + return false; + } + @Override protected String columnType(int sqlTypeCode) { switch ( sqlTypeCode ) { @@ -206,6 +220,9 @@ protected String columnType(int sqlTypeCode) { // which caused problems for schema update tool case NUMERIC: return getVersion().isBefore( 2 ) ? columnType( DECIMAL ) : super.columnType( sqlTypeCode ); + // Support was only added in 2.0 + case TIME_WITH_TIMEZONE: + return getVersion().isBefore( 2 ) ? columnType( TIMESTAMP_WITH_TIMEZONE ) : super.columnType( sqlTypeCode ); case NCHAR: return columnType( CHAR ); case NVARCHAR: @@ -248,6 +265,9 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR if ( getVersion().isSameOrAfter( 1, 4, 198 ) ) { ddlTypeRegistry.addDescriptor( new DdlTypeImpl( INTERVAL_SECOND, "interval second($p,$s)", this ) ); } + if ( getVersion().isSameOrAfter( 1, 4, 200 ) ) { + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "json", this ) ); + } } } @@ -258,13 +278,24 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration() .getJdbcTypeRegistry(); - jdbcTypeRegistry.addDescriptor( TIMESTAMP_UTC, InstantJdbcType.INSTANCE ); + if ( getVersion().isBefore( 2 ) ) { + // Support for TIME_WITH_TIMEZONE was only added in 2.0 + jdbcTypeRegistry.addDescriptor( TimeAsTimestampWithTimeZoneJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptor( TimeUtcAsJdbcTimeJdbcType.INSTANCE ); + } + else { + jdbcTypeRegistry.addDescriptor( TimeUtcAsOffsetTimeJdbcType.INSTANCE ); + } + jdbcTypeRegistry.addDescriptor( TIMESTAMP_UTC, TimestampUtcAsInstantJdbcType.INSTANCE ); if ( getVersion().isSameOrAfter( 1, 4, 197 ) ) { jdbcTypeRegistry.addDescriptorIfAbsent( UUIDJdbcType.INSTANCE ); } if ( getVersion().isSameOrAfter( 1, 4, 198 ) ) { jdbcTypeRegistry.addDescriptorIfAbsent( H2DurationIntervalSecondJdbcType.INSTANCE ); } + if ( getVersion().isSameOrAfter( 1, 4, 200 ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( H2FormatJsonJdbcType.INSTANCE ); + } } @Override @@ -309,8 +340,7 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio if ( useLocalTime ) { functionFactory.localtimeLocaltimestamp(); } - functionFactory.trunc(); -// functionFactory.truncate(); + functionFactory.trunc_dateTrunc(); functionFactory.dateTrunc(); functionFactory.bitLength(); functionFactory.octetLength(); @@ -393,6 +423,9 @@ public JdbcType resolveSqlTypeDescriptor( if ( "GEOMETRY".equals( columnTypeName ) ) { return jdbcTypeRegistry.getDescriptor( GEOMETRY ); } + else if ( "JSON".equals( columnTypeName ) ) { + return jdbcTypeRegistry.getDescriptor( JSON ); + } break; } return super.resolveSqlTypeDescriptor( columnTypeName, jdbcTypeCode, precision, scale, jdbcTypeRegistry ); @@ -733,6 +766,8 @@ public SQLExceptionConversionDelegate buildSQLExceptionConversionDelegate() { // NULL not allowed for column [90006-145] final String constraintName = getViolatedConstraintNameExtractor().extractConstraintName(sqlException); return new ConstraintViolationException(message, sqlException, sql, constraintName); + case 57014: + return new QueryTimeoutException( message, sqlException, sql ); } return null; @@ -868,4 +903,14 @@ public String getDisableConstraintsStatement() { public UniqueDelegate getUniqueDelegate() { return uniqueDelegate; } + + @Override + public String rowId(String rowId) { + return "_rowid_"; + } + + @Override + public int rowIdSqlType() { + return BIGINT; + } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacySqlAstTranslator.java index c13ae75cc571..9e2764053128 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/H2LegacySqlAstTranslator.java @@ -302,4 +302,9 @@ private boolean supportsOffsetFetchClausePercentWithTies() { // Introduction of PERCENT support https://github.com/h2database/h2database/commit/f45913302e5f6ad149155a73763c0c59d8205849 return getDialect().getVersion().isSameOrAfter( 1, 4, 198 ); } + + @Override + protected boolean supportsJoinInMutationStatementSubquery() { + return false; + } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/HSQLLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/HSQLLegacyDialect.java index bfb5e17cdd31..055797ab3313 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/HSQLLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/HSQLLegacyDialect.java @@ -100,9 +100,11 @@ public class HSQLLegacyDialect extends Dialect { org.hibernate.community.dialect.HSQLLegacyDialect.class.getName() ); private final UniqueDelegate uniqueDelegate = new CreateTableUniqueDelegate( this ); + private final HSQLIdentityColumnSupport identityColumnSupport; public HSQLLegacyDialect(DialectResolutionInfo info) { super( info ); + this.identityColumnSupport = new HSQLIdentityColumnSupport( getVersion() ); } public HSQLLegacyDialect() { @@ -111,6 +113,7 @@ public HSQLLegacyDialect() { public HSQLLegacyDialect(DatabaseVersion version) { super( version.isSame( 1, 8 ) ? reflectedVersion( version ) : version ); + this.identityColumnSupport = new HSQLIdentityColumnSupport( getVersion() ); } @Override @@ -199,8 +202,7 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.degrees(); functionFactory.log10(); functionFactory.rand(); - functionFactory.trunc(); -// functionFactory.truncate(); + functionFactory.trunc_dateTrunc_trunc(); functionFactory.pi(); functionFactory.soundex(); functionFactory.reverse(); @@ -771,7 +773,7 @@ public boolean requiresFloatCastingOfIntegerDivision() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new HSQLIdentityColumnSupport( this.getVersion() ); + return identityColumnSupport; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/InformixDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/InformixDialect.java index dc614f4a7fef..888c09af4679 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/InformixDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/InformixDialect.java @@ -518,7 +518,7 @@ public UniqueDelegate getUniqueDelegate() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new InformixIdentityColumnSupport(); + return InformixIdentityColumnSupport.INSTANCE; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/IngresDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/IngresDialect.java index a8a9a9d3421a..de5b0cbae608 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/IngresDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/IngresDialect.java @@ -251,8 +251,8 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.octetLength(); functionFactory.repeat(); functionFactory.trim2(); - functionFactory.trunc(); -// functionFactory.truncate(); + functionFactory.dateTrunc(); + functionFactory.trunc_dateTrunc(); functionFactory.initcap(); functionFactory.yearMonthDay(); functionFactory.hourMinuteSecond(); @@ -269,7 +269,6 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.sysdate(); functionFactory.position(); functionFactory.format_dateFormat(); - functionFactory.dateTrunc(); functionFactory.bitLength_pattern( "octet_length(hex(?1))*4" ); final BasicType integerType = functionContributions.getTypeConfiguration().getBasicTypeRegistry() @@ -400,10 +399,10 @@ public LimitHandler getLimitHandler() { @Override public IdentityColumnSupport getIdentityColumnSupport() { if ( getVersion().isSameOrAfter( 10 ) ) { - return new Ingres10IdentityColumnSupport(); + return Ingres10IdentityColumnSupport.INSTANCE; } else if ( getVersion().isSameOrAfter( 9, 3 ) ) { - return new Ingres9IdentityColumnSupport(); + return Ingres9IdentityColumnSupport.INSTANCE; } else { return super.getIdentityColumnSupport(); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/MariaDB102Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB102Dialect.java similarity index 71% rename from hibernate-core/src/main/java/org/hibernate/dialect/MariaDB102Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB102Dialect.java index 9712b5eec385..962b848d7da1 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/MariaDB102Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB102Dialect.java @@ -4,13 +4,15 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * @deprecated use {@code MariaDBDialect(1020)} */ @Deprecated -public class MariaDB102Dialect extends MariaDBDialect { +public class MariaDB102Dialect extends MariaDBLegacyDialect { public MariaDB102Dialect() { super( DatabaseVersion.make( 10, 2 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/MariaDB10Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB10Dialect.java similarity index 71% rename from hibernate-core/src/main/java/org/hibernate/dialect/MariaDB10Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB10Dialect.java index 1de6e150d23c..c09ac0755551 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/MariaDB10Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB10Dialect.java @@ -4,13 +4,15 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * @deprecated use {@code MariaDBDialect(1000)} */ @Deprecated -public class MariaDB10Dialect extends MariaDBDialect { +public class MariaDB10Dialect extends MariaDBLegacyDialect { public MariaDB10Dialect() { super( DatabaseVersion.make( 10 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/MariaDB53Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB53Dialect.java similarity index 73% rename from hibernate-core/src/main/java/org/hibernate/dialect/MariaDB53Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB53Dialect.java index 91bd81dccc0d..2e102401676d 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/MariaDB53Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDB53Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * @author Vlad Mihalcea @@ -12,7 +14,7 @@ * @deprecated use {@code MariaDBDialect(530)} */ @Deprecated -public class MariaDB53Dialect extends MariaDBDialect { +public class MariaDB53Dialect extends MariaDBLegacyDialect { public MariaDB53Dialect() { super( DatabaseVersion.make( 5, 3 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacyDialect.java index 14dfbfa03e32..ac2551f316d5 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacyDialect.java @@ -34,8 +34,10 @@ import org.hibernate.sql.exec.spi.JdbcOperation; import org.hibernate.tool.schema.extract.internal.SequenceInformationExtractorMariaDBDatabaseImpl; import org.hibernate.tool.schema.extract.spi.SequenceInformationExtractor; +import org.hibernate.type.SqlTypes; import org.hibernate.type.StandardBasicTypes; import org.hibernate.type.descriptor.jdbc.JdbcType; +import org.hibernate.type.descriptor.jdbc.JsonJdbcType; import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; import org.hibernate.type.descriptor.sql.internal.DdlTypeImpl; import org.hibernate.type.descriptor.sql.spi.DdlTypeRegistry; @@ -141,9 +143,11 @@ public JdbcType resolveSqlTypeDescriptor( @Override public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { + final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration().getJdbcTypeRegistry(); + // Make sure we register the JSON type descriptor before calling super, because MariaDB does not need casting + jdbcTypeRegistry.addDescriptorIfAbsent( SqlTypes.JSON, JsonJdbcType.INSTANCE ); + super.contributeTypes( typeContributions, serviceRegistry ); - final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration() - .getJdbcTypeRegistry(); if ( getVersion().isSameOrAfter( 10, 7 ) ) { jdbcTypeRegistry.addDescriptorIfAbsent( VarcharUUIDJdbcType.INSTANCE ); } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacySqlAstTranslator.java index 3690c32d6e83..cc4b556692a1 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MariaDBLegacySqlAstTranslator.java @@ -242,7 +242,7 @@ private boolean supportsWindowFunctions() { @Override public void visitCastTarget(CastTarget castTarget) { - String sqlType = MySQLSqlAstTranslator.getSqlType( castTarget, dialect ); + String sqlType = MySQLSqlAstTranslator.getSqlType( castTarget, getSessionFactory() ); if ( sqlType != null ) { appendSql( sqlType ); } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MimerSQLDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MimerSQLDialect.java index a24c3644ccd7..65eb290918d7 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MimerSQLDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MimerSQLDialect.java @@ -42,6 +42,7 @@ import static org.hibernate.type.SqlTypes.CHAR; import static org.hibernate.type.SqlTypes.CLOB; import static org.hibernate.type.SqlTypes.LONG32NVARCHAR; +import static org.hibernate.type.SqlTypes.LONG32VARBINARY; import static org.hibernate.type.SqlTypes.LONG32VARCHAR; import static org.hibernate.type.SqlTypes.NCHAR; import static org.hibernate.type.SqlTypes.NCLOB; @@ -113,7 +114,15 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR //Mimer CHARs are ASCII!! ddlTypeRegistry.addDescriptor( - CapacityDependentDdlType.builder( VARCHAR, columnType( LONG32VARCHAR ), "nvarchar(" + getMaxNVarcharLength() + ")", this ) + CapacityDependentDdlType.builder( + VARCHAR, + isLob( LONG32VARCHAR ) ? + CapacityDependentDdlType.LobKind.BIGGEST_LOB : + CapacityDependentDdlType.LobKind.NONE, + columnType( LONG32VARCHAR ), + "nvarchar(" + getMaxNVarcharLength() + ")", + this + ) .withTypeCapacity( getMaxNVarcharLength(), columnType( VARCHAR ) ) .build() ); @@ -158,7 +167,7 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.soundex(); functionFactory.octetLength(); functionFactory.bitLength(); - functionFactory.truncate(); + functionFactory.trunc_truncate(); functionFactory.repeat(); functionFactory.pad_repeat(); functionFactory.dayofweekmonthyear(); @@ -328,6 +337,6 @@ public boolean useInputStreamToInsertBlob() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new MimerSQLIdentityColumnSupport(); + return MimerSQLIdentityColumnSupport.INSTANCE; } } diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/MySQL55Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQL55Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/MySQL55Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQL55Dialect.java index bb69bf262950..b93ef50e4830 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/MySQL55Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQL55Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for MySQL 5.5.x specific features. @@ -14,7 +16,7 @@ * @deprecated use {@code MySQLDialect(550)} */ @Deprecated -public class MySQL55Dialect extends MySQLDialect { +public class MySQL55Dialect extends MySQLLegacyDialect { public MySQL55Dialect() { super( DatabaseVersion.make( 5, 5 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/MySQL5Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQL5Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/MySQL5Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQL5Dialect.java index 115431e4d7e3..30d8c79dc1fa 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/MySQL5Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQL5Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for MySQL 5.x specific features. @@ -14,7 +16,7 @@ * @deprecated use {@code MySQLDialect(500)} */ @Deprecated -public class MySQL5Dialect extends MySQLDialect { +public class MySQL5Dialect extends MySQLLegacyDialect { public MySQL5Dialect() { super( DatabaseVersion.make( 5 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacyDialect.java index 0cd1309022bb..9e99f03254d1 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacyDialect.java @@ -21,6 +21,7 @@ import org.hibernate.dialect.Dialect; import org.hibernate.dialect.InnoDBStorageEngine; import org.hibernate.dialect.MyISAMStorageEngine; +import org.hibernate.dialect.MySQLCastingJsonJdbcType; import org.hibernate.dialect.MySQLServerConfiguration; import org.hibernate.dialect.MySQLStorageEngine; import org.hibernate.dialect.Replacer; @@ -310,7 +311,7 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR final CapacityDependentDdlType.Builder varcharBuilder = CapacityDependentDdlType.builder( VARCHAR, - columnType( CLOB ), + CapacityDependentDdlType.LobKind.BIGGEST_LOB,columnType( CLOB ), "char", this ) @@ -323,7 +324,7 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR final CapacityDependentDdlType.Builder nvarcharBuilder = CapacityDependentDdlType.builder( NVARCHAR, - columnType( NCLOB ), + CapacityDependentDdlType.LobKind.BIGGEST_LOB,columnType( NCLOB ), "char", this ) @@ -336,7 +337,7 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR final CapacityDependentDdlType.Builder varbinaryBuilder = CapacityDependentDdlType.builder( VARBINARY, - columnType( BLOB ), + CapacityDependentDdlType.LobKind.BIGGEST_LOB,columnType( BLOB ), "binary", this ) @@ -630,11 +631,10 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { super.contributeTypes( typeContributions, serviceRegistry ); - final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration() - .getJdbcTypeRegistry(); + final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration().getJdbcTypeRegistry(); if ( getMySQLVersion().isSameOrAfter( 5, 7 ) ) { - jdbcTypeRegistry.addDescriptorIfAbsent( SqlTypes.JSON, JsonJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( SqlTypes.JSON, MySQLCastingJsonJdbcType.INSTANCE ); } // MySQL requires a custom binder for binding untyped nulls with the NULL type @@ -1117,7 +1117,7 @@ public IdentifierHelper buildIdentifierHelper(IdentifierHelperBuilder builder, D @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new MySQLIdentityColumnSupport(); + return MySQLIdentityColumnSupport.INSTANCE; } @Override @@ -1251,7 +1251,7 @@ private String withTimeout(String lockString, int timeout) { case LockOptions.WAIT_FOREVER: return lockString; default: - return supportsWait() ? lockString + " wait " + timeout : lockString; + return supportsWait() ? lockString + " wait " + getTimeoutInSeconds( timeout ) : lockString; } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacySqlAstTranslator.java index 3d5cd991db1f..59ed599c9b04 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/MySQLLegacySqlAstTranslator.java @@ -6,7 +6,7 @@ */ package org.hibernate.community.dialect; -import org.hibernate.dialect.MySQLDialect; +import org.hibernate.dialect.DialectDelegateWrapper; import org.hibernate.dialect.MySQLSqlAstTranslator; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.query.sqm.ComparisonOperator; @@ -244,13 +244,13 @@ protected String getFromDualForSelectOnly() { } @Override - public MySQLDialect getDialect() { - return (MySQLDialect) super.getDialect(); + public MySQLLegacyDialect getDialect() { + return (MySQLLegacyDialect) DialectDelegateWrapper.extractRealDialect( super.getDialect() ); } @Override public void visitCastTarget(CastTarget castTarget) { - String sqlType = MySQLSqlAstTranslator.getSqlType( castTarget, getDialect() ); + String sqlType = MySQLSqlAstTranslator.getSqlType( castTarget, getSessionFactory() ); if ( sqlType != null ) { appendSql( sqlType ); } diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/Oracle10gDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle10gDialect.java similarity index 79% rename from hibernate-core/src/main/java/org/hibernate/dialect/Oracle10gDialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle10gDialect.java index 4c3447872e5a..7028ce91a4f8 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/Oracle10gDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle10gDialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * A dialect specifically for use with Oracle 10g. @@ -18,7 +20,7 @@ * @deprecated use {@code OracleDialect(10)} */ @Deprecated -public class Oracle10gDialect extends OracleDialect { +public class Oracle10gDialect extends OracleLegacyDialect { public Oracle10gDialect() { super( DatabaseVersion.make( 10 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/Oracle8iDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle8iDialect.java similarity index 73% rename from hibernate-core/src/main/java/org/hibernate/dialect/Oracle8iDialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle8iDialect.java index cf3b2c4a8a22..936f042312ff 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/Oracle8iDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle8iDialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * A dialect for Oracle 8i databases. @@ -12,7 +14,7 @@ * @deprecated use {@code OracleDialect(8)} */ @Deprecated -public class Oracle8iDialect extends OracleDialect { +public class Oracle8iDialect extends OracleLegacyDialect { public Oracle8iDialect() { super( DatabaseVersion.make( 8 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/Oracle9iDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle9iDialect.java similarity index 79% rename from hibernate-core/src/main/java/org/hibernate/dialect/Oracle9iDialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle9iDialect.java index daa6709ec8b8..d0791f3bdf36 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/Oracle9iDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Oracle9iDialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * A dialect for Oracle 9i databases. @@ -17,7 +19,7 @@ * @deprecated use {@code OracleDialect(9)} */ @Deprecated -public class Oracle9iDialect extends OracleDialect { +public class Oracle9iDialect extends OracleLegacyDialect { public Oracle9iDialect() { super( DatabaseVersion.make( 9 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacyDialect.java index 08df7598c222..6b70c0209767 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacyDialect.java @@ -11,7 +11,11 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.util.List; import java.util.Locale; +import java.util.TimeZone; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -21,14 +25,14 @@ import org.hibernate.boot.model.TypeContributions; import org.hibernate.cfg.Environment; import org.hibernate.dialect.BooleanDecoder; +import org.hibernate.dialect.DmlTargetColumnQualifierSupport; import org.hibernate.dialect.DatabaseVersion; import org.hibernate.dialect.Dialect; -import org.hibernate.dialect.OracleArrayJdbcType; import org.hibernate.dialect.OracleBooleanJdbcType; +import org.hibernate.dialect.OracleJdbcHelper; import org.hibernate.dialect.OracleJsonJdbcType; +import org.hibernate.dialect.OracleReflectionStructJdbcType; import org.hibernate.dialect.OracleTypes; -import org.hibernate.dialect.OracleStructJdbcType; -import org.hibernate.dialect.OracleTypesHelper; import org.hibernate.dialect.OracleXmlJdbcType; import org.hibernate.dialect.Replacer; import org.hibernate.dialect.RowLockStrategy; @@ -38,6 +42,7 @@ import org.hibernate.dialect.function.CommonFunctionFactory; import org.hibernate.dialect.function.ModeStatsModeEmulation; import org.hibernate.dialect.function.NvlCoalesceEmulation; +import org.hibernate.dialect.function.OracleTruncFunction; import org.hibernate.dialect.identity.IdentityColumnSupport; import org.hibernate.dialect.identity.Oracle12cIdentityColumnSupport; import org.hibernate.dialect.pagination.LegacyOracleLimitHandler; @@ -105,7 +110,9 @@ import jakarta.persistence.TemporalType; +import static java.lang.String.join; import static org.hibernate.exception.spi.TemplatedViolatedConstraintNameExtractor.extractUsingTemplate; +import static org.hibernate.internal.util.StringHelper.isEmpty; import static org.hibernate.query.sqm.TemporalUnit.DAY; import static org.hibernate.query.sqm.TemporalUnit.HOUR; import static org.hibernate.query.sqm.TemporalUnit.MINUTE; @@ -134,6 +141,7 @@ import static org.hibernate.type.SqlTypes.TINYINT; import static org.hibernate.type.SqlTypes.VARBINARY; import static org.hibernate.type.SqlTypes.VARCHAR; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithNanos; /** * A {@linkplain Dialect SQL dialect} for Oracle 8i and above. @@ -152,6 +160,13 @@ public class OracleLegacyDialect extends Dialect { public static final String PREFER_LONG_RAW = "hibernate.dialect.oracle.prefer_long_raw"; + private static final String yqmSelect = + "( TRUNC(%2$s, 'MONTH') + NUMTOYMINTERVAL(%1$s, 'MONTH') + ( LEAST( EXTRACT( DAY FROM %2$s ), EXTRACT( DAY FROM LAST_DAY( TRUNC(%2$s, 'MONTH') + NUMTOYMINTERVAL(%1$s, 'MONTH') ) ) ) - 1 ) )"; + + private static final String ADD_YEAR_EXPRESSION = String.format( yqmSelect, "?2*12", "?3" ); + private static final String ADD_QUARTER_EXPRESSION = String.format( yqmSelect, "?2*3", "?3" ); + private static final String ADD_MONTH_EXPRESSION = String.format( yqmSelect, "?2", "?3" ); + private final LimitHandler limitHandler = supportsFetchClause( FetchClauseType.ROWS_ONLY ) ? Oracle12LimitHandler.INSTANCE : new LegacyOracleLimitHandler( getVersion() ); @@ -185,7 +200,6 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.cosh(); functionFactory.sinh(); functionFactory.tanh(); - functionFactory.trunc(); functionFactory.log(); functionFactory.log10_log(); functionFactory.soundex(); @@ -255,6 +269,11 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio "mode", new ModeStatsModeEmulation( typeConfiguration ) ); + functionContributions.getFunctionRegistry().register( + "trunc", + new OracleTruncFunction( functionContributions.getTypeConfiguration() ) + ); + functionContributions.getFunctionRegistry().registerAlternateKey( "truncate", "trunc" ); } @Override @@ -311,6 +330,11 @@ public String currentTimestampWithTimeZone() { return getVersion().isBefore( 9 ) ? currentTimestamp() : "current_timestamp"; } + @Override + public boolean supportsInsertReturningGeneratedKeys() { + return getVersion().isSameOrAfter( 12 ); + } + /** * Oracle doesn't have any sort of {@link Types#BOOLEAN} @@ -443,6 +467,8 @@ public String extractPattern(TemporalUnit unit) { return "to_number(to_char(?2,'MI'))"; case SECOND: return "to_number(to_char(?2,'SS'))"; + case EPOCH: + return "trunc((cast(?2 at time zone 'UTC' as date) - date '1970-1-1')*86400)"; default: return super.extractPattern(unit); } @@ -450,150 +476,135 @@ public String extractPattern(TemporalUnit unit) { @Override public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, IntervalType intervalType) { - StringBuilder pattern = new StringBuilder(); - pattern.append("(?3+"); + final StringBuilder pattern = new StringBuilder(); switch ( unit ) { case YEAR: + pattern.append( ADD_YEAR_EXPRESSION ); + break; case QUARTER: + pattern.append( ADD_QUARTER_EXPRESSION ); + break; case MONTH: - pattern.append("numtoyminterval"); + pattern.append( ADD_MONTH_EXPRESSION ); break; case WEEK: + if ( temporalType != TemporalType.DATE ) { + pattern.append( "(?3+numtodsinterval((?2)*7,'day'))" ); + } + else { + pattern.append( "(?3+(?2)" ).append( unit.conversionFactor( DAY, this ) ).append( ")" ); + } + break; case DAY: + if ( temporalType == TemporalType.DATE ) { + pattern.append( "(?3+(?2))" ); + break; + } case HOUR: case MINUTE: case SECOND: - case NANOSECOND: - case NATIVE: - pattern.append("numtodsinterval"); + pattern.append( "(?3+numtodsinterval(?2,'?1'))" ); break; - default: - throw new SemanticException(unit + " is not a legal field"); - } - pattern.append("("); - switch ( unit ) { case NANOSECOND: - case QUARTER: - case WEEK: - pattern.append("("); + pattern.append( "(?3+numtodsinterval((?2)/1e9,'second'))" ); break; - } - pattern.append("?2"); - switch ( unit ) { - case QUARTER: - pattern.append(")*3"); - break; - case WEEK: - pattern.append(")*7"); - break; - case NANOSECOND: - pattern.append(")/1e9"); - break; - } - pattern.append(",'"); - switch ( unit ) { - case QUARTER: - pattern.append("month"); - break; - case WEEK: - pattern.append("day"); - break; - case NANOSECOND: case NATIVE: - pattern.append("second"); + pattern.append( "(?3+numtodsinterval(?2,'second'))" ); break; default: - pattern.append("?1"); + throw new SemanticException( unit + " is not a legal field" ); } - pattern.append("')"); - pattern.append(")"); return pattern.toString(); } @Override - public String timestampdiffPattern( - TemporalUnit unit, - TemporalType fromTemporalType, TemporalType toTemporalType) { - StringBuilder pattern = new StringBuilder(); - boolean timestamp = toTemporalType == TemporalType.TIMESTAMP || fromTemporalType == TemporalType.TIMESTAMP; - switch (unit) { + public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalType, TemporalType toTemporalType) { + final StringBuilder pattern = new StringBuilder(); + final boolean hasTimePart = toTemporalType != TemporalType.DATE || fromTemporalType != TemporalType.DATE; + switch ( unit ) { case YEAR: - extractField(pattern, YEAR, unit); + extractField( pattern, YEAR, unit ); break; case QUARTER: case MONTH: - pattern.append("("); - extractField(pattern, YEAR, unit); - pattern.append("+"); - extractField(pattern, MONTH, unit); - pattern.append(")"); + pattern.append( "(" ); + extractField( pattern, YEAR, unit ); + pattern.append( "+" ); + extractField( pattern, MONTH, unit ); + pattern.append( ")" ); break; - case WEEK: case DAY: - extractField(pattern, DAY, unit); - break; - case HOUR: - pattern.append("("); - extractField(pattern, DAY, unit); - if (timestamp) { - pattern.append("+"); - extractField(pattern, HOUR, unit); + if ( hasTimePart ) { + pattern.append( "(cast(?3 as date)-cast(?2 as date))" ); + } + else { + pattern.append( "(?3-?2)" ); } - pattern.append(")"); break; + case WEEK: case MINUTE: - pattern.append("("); - extractField(pattern, DAY, unit); - if (timestamp) { - pattern.append("+"); - extractField(pattern, HOUR, unit); - pattern.append("+"); - extractField(pattern, MINUTE, unit); + case SECOND: + case HOUR: + if ( hasTimePart ) { + pattern.append( "((cast(?3 as date)-cast(?2 as date))" ); + } + else { + pattern.append( "((?3-?2)" ); } - pattern.append(")"); + pattern.append( TemporalUnit.DAY.conversionFactor(unit ,this ) ); + pattern.append( ")" ); break; case NATIVE: case NANOSECOND: - case SECOND: - pattern.append("("); - extractField(pattern, DAY, unit); - if (timestamp) { - pattern.append("+"); - extractField(pattern, HOUR, unit); - pattern.append("+"); - extractField(pattern, MINUTE, unit); - pattern.append("+"); - extractField(pattern, SECOND, unit); + if ( hasTimePart ) { + if ( supportsLateral() ) { + pattern.append( "(select extract(day from t.i)" ).append( TemporalUnit.DAY.conversionFactor( unit, this ) ) + .append( "+extract(hour from t.i)" ).append( TemporalUnit.HOUR.conversionFactor( unit, this ) ) + .append( "+extract(minute from t.i)" ).append( MINUTE.conversionFactor( unit, this ) ) + .append( "+extract(second from t.i)" ).append( SECOND.conversionFactor( unit, this ) ) + .append( " from(select ?3-?2 i from dual)t" ); + } + else { + pattern.append( "(" ); + extractField( pattern, DAY, unit ); + pattern.append( "+" ); + extractField( pattern, HOUR, unit ); + pattern.append( "+" ); + extractField( pattern, MINUTE, unit ); + pattern.append( "+" ); + extractField( pattern, SECOND, unit ); + } } - pattern.append(")"); + else { + pattern.append( "((?3-?2)" ); + pattern.append( TemporalUnit.DAY.conversionFactor( unit, this ) ); + } + pattern.append( ")" ); break; default: - throw new SemanticException("unrecognized field: " + unit); + throw new SemanticException( "unrecognized field: " + unit ); } return pattern.toString(); } - private void extractField( - StringBuilder pattern, - TemporalUnit unit, TemporalUnit toUnit) { - pattern.append("extract("); - pattern.append( translateExtractField(unit) ); - pattern.append(" from (?3-?2) "); - switch (unit) { + private void extractField(StringBuilder pattern, TemporalUnit unit, TemporalUnit toUnit) { + pattern.append( "extract(" ); + pattern.append( translateExtractField( unit ) ); + pattern.append( " from (?3-?2)" ); + switch ( unit ) { case YEAR: case MONTH: - pattern.append("year to month"); + pattern.append( " year(9) to month" ); break; case DAY: case HOUR: case MINUTE: case SECOND: - pattern.append("day to second"); break; default: - throw new SemanticException(unit + " is not a legal field"); + throw new SemanticException( unit + " is not a legal field" ); } - pattern.append(")"); + pattern.append( ")" ); pattern.append( unit.conversionFactor( toUnit, this ) ); } @@ -622,8 +633,9 @@ protected String columnType(int sqlTypeCode) { return "number($p,$s)"; case DATE: - case TIME: return "date"; + case TIME: + return getVersion().isBefore( 9 ) ? "date" : super.columnType( sqlTypeCode ); case TIMESTAMP: // the only difference between date and timestamp // on Oracle is that date has no fractional seconds @@ -722,7 +734,7 @@ public JdbcType resolveSqlTypeDescriptor( } //intentional fall-through: case Types.DECIMAL: - if ( scale == 0 ) { + if ( scale == 0 && precision != 0 ) { // Don't infer TINYINT or SMALLINT on Oracle, since the // range of values of a NUMBER(3,0) or NUMBER(5,0) just // doesn't really match naturally. @@ -772,7 +784,12 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry typeContributions.contributeJdbcType( OracleBooleanJdbcType.INSTANCE ); typeContributions.contributeJdbcType( OracleXmlJdbcType.INSTANCE ); - typeContributions.contributeJdbcType( OracleStructJdbcType.INSTANCE ); + if ( OracleJdbcHelper.isUsable( serviceRegistry ) ) { + typeContributions.contributeJdbcType( OracleJdbcHelper.getStructJdbcType( serviceRegistry ) ); + } + else { + typeContributions.contributeJdbcType( OracleReflectionStructJdbcType.INSTANCE ); + } if ( getVersion().isSameOrAfter( 12 ) ) { // account for Oracle's deprecated support for LONGVARBINARY @@ -797,7 +814,12 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry } } - typeContributions.contributeJdbcType( OracleArrayJdbcType.INSTANCE ); + if ( OracleJdbcHelper.isUsable( serviceRegistry ) ) { + typeContributions.contributeJdbcType( OracleJdbcHelper.getArrayJdbcType( serviceRegistry ) ); + } + else { + typeContributions.contributeJdbcType( OracleReflectionStructJdbcType.INSTANCE ); + } // Oracle requires a custom binder for binding untyped nulls with the NULL type typeContributions.contributeJdbcType( NullJdbcType.INSTANCE ); typeContributions.contributeJdbcType( ObjectNullAsNullTypeJdbcType.INSTANCE ); @@ -837,7 +859,7 @@ public String getNativeIdentifierGeneratorStrategy() { public IdentityColumnSupport getIdentityColumnSupport() { return getVersion().isBefore( 12 ) ? super.getIdentityColumnSupport() - : new Oracle12cIdentityColumnSupport(); + : Oracle12cIdentityColumnSupport.INSTANCE; } @Override @@ -979,7 +1001,7 @@ public SQLExceptionConversionDelegate buildSQLExceptionConversionDelegate() { @Override public int registerResultSetOutParameter(CallableStatement statement, int col) throws SQLException { // register the type of the out param - an Oracle specific type - statement.registerOutParameter( col, OracleTypesHelper.INSTANCE.getOracleCursorTypeSqlType() ); + statement.registerOutParameter( col, OracleTypes.CURSOR ); col++; return col; } @@ -1091,25 +1113,27 @@ public boolean useFollowOnLocking(String sql, QueryOptions queryOptions) { } @Override - public String getQueryHintString(String sql, String hints) { - String statementType = statementType(sql); - - final int pos = sql.indexOf( statementType ); - if ( pos > -1 ) { - final StringBuilder buffer = new StringBuilder( sql.length() + hints.length() + 8 ); - if ( pos > 0 ) { - buffer.append( sql, 0, pos ); - } - buffer - .append( statementType ) - .append( " /*+ " ) - .append( hints ) - .append( " */" ) - .append( sql.substring( pos + statementType.length() ) ); - sql = buffer.toString(); + public String getQueryHintString(String query, List hintList) { + if ( hintList.isEmpty() ) { + return query; + } + else { + final String hints = join( " ", hintList ); + return isEmpty( hints ) ? query : getQueryHintString( query, hints ); } + } - return sql; + @Override + public String getQueryHintString(String sql, String hints) { + final String statementType = statementType( sql ); + final int start = sql.indexOf( statementType ); + if ( start < 0 ) { + return sql; + } + else { + int end = start + statementType.length(); + return sql.substring( 0, end ) + " /*+ " + hints + " */" + sql.substring( end ); + } } @Override @@ -1146,14 +1170,15 @@ public boolean supportsPartitionBy() { return true; } - private String statementType(String sql) { - Matcher matcher = SQL_STATEMENT_TYPE_PATTERN.matcher( sql ); + private String statementType(String sql) { + final Matcher matcher = SQL_STATEMENT_TYPE_PATTERN.matcher( sql ); if ( matcher.matches() && matcher.groupCount() == 1 ) { return matcher.group(1); } - - throw new IllegalArgumentException( "Can't determine SQL statement type for statement: " + sql ); + else { + throw new IllegalArgumentException( "Can't determine SQL statement type for statement: " + sql ); + } } @Override @@ -1237,7 +1262,7 @@ private String withTimeout(String lockString, int timeout) { case LockOptions.WAIT_FOREVER: return lockString; default: - return supportsWait() ? lockString + " wait " + Math.round(timeout / 1e3f) : lockString; + return supportsWait() ? lockString + " wait " + getTimeoutInSeconds( timeout ) : lockString; } } @@ -1261,6 +1286,32 @@ public String getReadLockString(String aliases, int timeout) { return getWriteLockString( aliases, timeout ); } + @Override + public boolean supportsTemporalLiteralOffset() { + // Oracle *does* support offsets, but only + // in the ANSI syntax, not in the JDBC + // escape-based syntax, which we use in + // almost all circumstances (see below) + return false; + } + + @Override + public void appendDateTimeLiteral(SqlAppender appender, TemporalAccessor temporalAccessor, TemporalType precision, TimeZone jdbcTimeZone) { + // we usually use the JDBC escape-based syntax + // because we want to let the JDBC driver handle + // TIME (a concept which does not exist in Oracle) + // but for the special case of timestamps with an + // offset we need to use the ANSI syntax + if ( precision == TemporalType.TIMESTAMP && temporalAccessor.isSupported( ChronoField.OFFSET_SECONDS ) ) { + appender.appendSql( "timestamp '" ); + appendAsTimestampWithNanos( appender, temporalAccessor, true, jdbcTimeZone, false ); + appender.appendSql( '\'' ); + } + else { + super.appendDateTimeLiteral( appender, temporalAccessor, precision, jdbcTimeZone ); + } + } + @Override public void appendDatetimeFormat(SqlAppender appender, String format) { // Unlike other databases, Oracle requires an explicit reset for the fm modifier, @@ -1366,7 +1417,7 @@ public ResultSet getResultSet(CallableStatement statement, int position) throws @Override public int registerResultSetOutParameter(CallableStatement statement, String name) throws SQLException { - statement.registerOutParameter( name, OracleTypesHelper.INSTANCE.getOracleCursorTypeSqlType() ); + statement.registerOutParameter( name, OracleTypes.CURSOR ); return 1; } @@ -1417,4 +1468,14 @@ public UniqueDelegate getUniqueDelegate() { public String getCreateUserDefinedTypeKindString() { return "object"; } + + @Override + public String rowId(String rowId) { + return "rowid"; + } + + @Override + public DmlTargetColumnQualifierSupport getDmlTargetColumnQualifierSupport() { + return DmlTargetColumnQualifierSupport.TABLE_ALIAS; + } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacySqlAstTranslator.java index 04a3f1c42f87..576dac85689c 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/OracleLegacySqlAstTranslator.java @@ -6,11 +6,14 @@ */ package org.hibernate.community.dialect; +import java.util.ArrayList; import java.util.List; -import org.hibernate.LockMode; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.internal.util.collections.Stack; +import org.hibernate.metamodel.mapping.EmbeddableValuedModelPart; +import org.hibernate.metamodel.mapping.EntityIdentifierMapping; +import org.hibernate.metamodel.mapping.EntityMappingType; import org.hibernate.metamodel.mapping.JdbcMappingContainer; import org.hibernate.query.IllegalQueryOperationException; import org.hibernate.query.sqm.ComparisonOperator; @@ -24,7 +27,6 @@ import org.hibernate.sql.ast.tree.cte.CteMaterialization; import org.hibernate.sql.ast.tree.expression.CaseSearchedExpression; import org.hibernate.sql.ast.tree.expression.ColumnReference; -import org.hibernate.sql.ast.tree.expression.AggregateColumnWriteExpression; import org.hibernate.sql.ast.tree.expression.Expression; import org.hibernate.sql.ast.tree.expression.FunctionExpression; import org.hibernate.sql.ast.tree.expression.Literal; @@ -32,13 +34,16 @@ import org.hibernate.sql.ast.tree.expression.SqlTuple; import org.hibernate.sql.ast.tree.expression.SqlTupleContainer; import org.hibernate.sql.ast.tree.expression.Summarization; +import org.hibernate.sql.ast.tree.from.FromClause; import org.hibernate.sql.ast.tree.from.FunctionTableReference; -import org.hibernate.sql.ast.tree.from.NamedTableReference; import org.hibernate.sql.ast.tree.from.QueryPartTableReference; +import org.hibernate.sql.ast.tree.from.TableGroup; import org.hibernate.sql.ast.tree.from.UnionTableGroup; import org.hibernate.sql.ast.tree.from.ValuesTableReference; import org.hibernate.sql.ast.tree.insert.InsertSelectStatement; import org.hibernate.sql.ast.tree.insert.Values; +import org.hibernate.sql.ast.tree.predicate.InSubQueryPredicate; +import org.hibernate.sql.ast.tree.predicate.Predicate; import org.hibernate.sql.ast.tree.select.QueryGroup; import org.hibernate.sql.ast.tree.select.QueryPart; import org.hibernate.sql.ast.tree.select.QuerySpec; @@ -46,6 +51,7 @@ import org.hibernate.sql.ast.tree.select.SortSpecification; import org.hibernate.sql.ast.tree.update.Assignment; import org.hibernate.sql.exec.spi.JdbcOperation; +import org.hibernate.sql.results.internal.SqlSelectionImpl; import org.hibernate.type.SqlTypes; /** @@ -97,12 +103,6 @@ protected LockStrategy determineLockingStrategy( Boolean followOnLocking) { LockStrategy strategy = super.determineLockingStrategy( querySpec, forUpdateClause, followOnLocking ); final boolean followOnLockingDisabled = Boolean.FALSE.equals( followOnLocking ); - if ( strategy != LockStrategy.FOLLOW_ON && querySpec.hasSortSpecifications() ) { - if ( followOnLockingDisabled ) { - throw new IllegalQueryOperationException( "Locking with ORDER BY is not supported" ); - } - strategy = LockStrategy.FOLLOW_ON; - } // Oracle also doesn't support locks with set operators // See https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_10002.htm#i2066346 if ( strategy != LockStrategy.FOLLOW_ON && isPartOfQueryGroup() ) { @@ -117,29 +117,12 @@ protected LockStrategy determineLockingStrategy( } strategy = LockStrategy.FOLLOW_ON; } - if ( strategy != LockStrategy.FOLLOW_ON && useOffsetFetchClause( querySpec ) && !isRowsOnlyFetchClauseType( querySpec ) ) { + if ( strategy != LockStrategy.FOLLOW_ON && needsLockingWrapper( querySpec ) && !canApplyLockingWrapper( querySpec ) ) { if ( followOnLockingDisabled ) { - throw new IllegalQueryOperationException( "Locking with FETCH is not supported" ); + throw new IllegalQueryOperationException( "Locking with OFFSET/FETCH is not supported" ); } strategy = LockStrategy.FOLLOW_ON; } - if ( strategy != LockStrategy.FOLLOW_ON ) { - final boolean hasOffset; - if ( querySpec.isRoot() && hasLimit() && getLimit().getFirstRow() != null ) { - hasOffset = true; - // We must record that the generated SQL depends on the fact that there is an offset - addAppliedParameterBinding( getOffsetParameter(), null ); - } - else { - hasOffset = querySpec.getOffsetClauseExpression() != null; - } - if ( hasOffset ) { - if ( followOnLockingDisabled ) { - throw new IllegalQueryOperationException( "Locking with OFFSET is not supported" ); - } - strategy = LockStrategy.FOLLOW_ON; - } - } return strategy; } @@ -166,7 +149,7 @@ protected boolean supportsNestedSubqueryCorrelation() { protected boolean shouldEmulateFetchClause(QueryPart queryPart) { // Check if current query part is already row numbering to avoid infinite recursion - if (getQueryPartForRowNumbering() == queryPart) { + if ( getQueryPartForRowNumbering() == queryPart ) { return false; } final boolean hasLimit = queryPart.isRoot() && hasLimit() || queryPart.getFetchClauseExpression() != null @@ -176,77 +159,12 @@ protected boolean shouldEmulateFetchClause(QueryPart queryPart) { } // Even if Oracle supports the OFFSET/FETCH clause, there are conditions where we still want to use the ROWNUM pagination if ( supportsOffsetFetchClause() ) { - // When the query has no sort specifications and offset, we want to use the ROWNUM pagination as that is a special locking case - return !queryPart.hasSortSpecifications() && !hasOffset( queryPart ) - // Workaround an Oracle bug, segmentation fault for insert queries with a plain query group and fetch clause - || queryPart instanceof QueryGroup && getClauseStack().isEmpty() && getStatement() instanceof InsertSelectStatement; + // Workaround an Oracle bug, segmentation fault for insert queries with a plain query group and fetch clause + return queryPart instanceof QueryGroup && getClauseStack().isEmpty() && getStatement() instanceof InsertSelectStatement; } return true; } - @Override - protected FetchClauseType getFetchClauseTypeForRowNumbering(QueryPart queryPart) { - final FetchClauseType fetchClauseType = super.getFetchClauseTypeForRowNumbering( queryPart ); - final boolean hasOffset; - if ( queryPart.isRoot() && hasLimit() ) { - hasOffset = getLimit().getFirstRow() != null; - } - else { - hasOffset = queryPart.getOffsetClauseExpression() != null; - } - if ( queryPart instanceof QuerySpec && !hasOffset && fetchClauseType == FetchClauseType.ROWS_ONLY ) { - // We return null here, because in this particular case, we render a special rownum query - // which can be seen in #emulateFetchOffsetWithWindowFunctions - // Note that we also build upon this in #visitOrderBy - return null; - } - return fetchClauseType; - } - - @Override - protected void emulateFetchOffsetWithWindowFunctions( - QueryPart queryPart, - Expression offsetExpression, - Expression fetchExpression, - FetchClauseType fetchClauseType, - boolean emulateFetchClause) { - if ( queryPart instanceof QuerySpec && offsetExpression == null && fetchClauseType == FetchClauseType.ROWS_ONLY ) { - // Special case for Oracle to support locking along with simple max results paging - final QuerySpec querySpec = (QuerySpec) queryPart; - withRowNumbering( - querySpec, - true, // we need select aliases to avoid ORA-00918: column ambiguously defined - () -> { - appendSql( "select * from " ); - emulateFetchOffsetWithWindowFunctionsVisitQueryPart( querySpec ); - appendSql( " where rownum<=" ); - final Stack clauseStack = getClauseStack(); - clauseStack.push( Clause.WHERE ); - try { - fetchExpression.accept( this ); - - // We render the FOR UPDATE clause in the outer query - clauseStack.pop(); - clauseStack.push( Clause.FOR_UPDATE ); - visitForUpdateClause( querySpec ); - } - finally { - clauseStack.pop(); - } - } - ); - } - else { - super.emulateFetchOffsetWithWindowFunctions( - queryPart, - offsetExpression, - fetchExpression, - fetchClauseType, - emulateFetchClause - ); - } - } - @Override protected void visitOrderBy(List sortSpecifications) { // If we have a query part for row numbering, there is no need to render the order by clause @@ -262,13 +180,49 @@ protected void visitOrderBy(List sortSpecifications) { final QuerySpec querySpec = (QuerySpec) queryPartForRowNumbering; if ( querySpec.getOffsetClauseExpression() == null && ( !querySpec.isRoot() || getOffsetParameter() == null ) ) { - // When rendering `rownum` for Oracle, we need to render the order by clause still - renderOrderBy( true, sortSpecifications ); + // When we enter here, we need to handle the special ROWNUM pagination + if ( hasGroupingOrDistinct( querySpec ) || querySpec.getFromClause().hasJoins() ) { + // When the query spec has joins, a group by, having or distinct clause, + // we just need to render the order by clause, because the query is wrapped + renderOrderBy( true, sortSpecifications ); + } + else { + // Otherwise we need to render the ROWNUM pagination predicate in here + final Predicate whereClauseRestrictions = querySpec.getWhereClauseRestrictions(); + if ( whereClauseRestrictions != null && !whereClauseRestrictions.isEmpty() ) { + appendSql( " and " ); + } + else { + appendSql( " where " ); + } + appendSql( "rownum<=" ); + final Stack clauseStack = getClauseStack(); + clauseStack.push( Clause.WHERE ); + try { + if ( querySpec.isRoot() && hasLimit() ) { + getLimitParameter().accept( this ); + } + else { + querySpec.getFetchClauseExpression().accept( this ); + } + } + finally { + clauseStack.pop(); + } + renderOrderBy( true, sortSpecifications ); + visitForUpdateClause( querySpec ); + } } } } } + private boolean hasGroupingOrDistinct(QuerySpec querySpec) { + return querySpec.getSelectClause().isDistinct() + || !querySpec.getGroupByClauseExpressions().isEmpty() + || querySpec.getHavingClauseRestrictions() != null; + } + @Override protected void visitValuesList(List valuesList) { if ( valuesList.size() < 2 ) { @@ -323,19 +277,158 @@ public void visitQueryGroup(QueryGroup queryGroup) { @Override public void visitQuerySpec(QuerySpec querySpec) { + final EntityIdentifierMapping identifierMappingForLockingWrapper = identifierMappingForLockingWrapper( querySpec ); + final Expression offsetExpression; + final Expression fetchExpression; + final FetchClauseType fetchClauseType; + if ( querySpec.isRoot() && hasLimit() ) { + prepareLimitOffsetParameters(); + offsetExpression = getOffsetParameter(); + fetchExpression = getLimitParameter(); + fetchClauseType = FetchClauseType.ROWS_ONLY; + } + else { + offsetExpression = querySpec.getOffsetClauseExpression(); + fetchExpression = querySpec.getFetchClauseExpression(); + fetchClauseType = querySpec.getFetchClauseType(); + } if ( shouldEmulateFetchClause( querySpec ) ) { - emulateFetchOffsetWithWindowFunctions( querySpec, true ); + if ( identifierMappingForLockingWrapper == null ) { + emulateFetchOffsetWithWindowFunctions( + querySpec, + offsetExpression, + fetchExpression, + fetchClauseType, + true + ); + } + else { + super.visitQuerySpec( + createLockingWrapper( + querySpec, + offsetExpression, + fetchExpression, + fetchClauseType, + identifierMappingForLockingWrapper + ) + ); + // Render the for update clause for the original query spec, because the locking wrapper is marked as non-root + visitForUpdateClause( querySpec ); + } } else { - super.visitQuerySpec( querySpec ); + if ( identifierMappingForLockingWrapper == null ) { + super.visitQuerySpec( querySpec ); + } + else { + super.visitQuerySpec( + createLockingWrapper( + querySpec, + offsetExpression, + fetchExpression, + fetchClauseType, + identifierMappingForLockingWrapper + ) + ); + // Render the for update clause for the original query spec, because the locking wrapper is marked as non-root + visitForUpdateClause( querySpec ); + } } } + private QuerySpec createLockingWrapper( + QuerySpec querySpec, + Expression offsetExpression, + Expression fetchExpression, + FetchClauseType fetchClauseType, + EntityIdentifierMapping identifierMappingForLockingWrapper) { + + final TableGroup rootTableGroup = querySpec.getFromClause().getRoots().get( 0 ); + final List idColumnReferences = new ArrayList<>( identifierMappingForLockingWrapper.getJdbcTypeCount() ); + identifierMappingForLockingWrapper.forEachSelectable( + 0, + (selectionIndex, selectableMapping) -> { + idColumnReferences.add( new ColumnReference( rootTableGroup.getPrimaryTableReference(), selectableMapping ) ); + } + ); + final Expression idExpression; + if ( identifierMappingForLockingWrapper instanceof EmbeddableValuedModelPart ) { + idExpression = new SqlTuple( idColumnReferences, identifierMappingForLockingWrapper ); + } + else { + idExpression = idColumnReferences.get( 0 ); + } + final QuerySpec subquery = new QuerySpec( false, 1 ); + for ( ColumnReference idColumnReference : idColumnReferences ) { + subquery.getSelectClause().addSqlSelection( new SqlSelectionImpl( idColumnReference ) ); + } + subquery.getFromClause().addRoot( rootTableGroup ); + subquery.applyPredicate( querySpec.getWhereClauseRestrictions() ); + if ( querySpec.hasSortSpecifications() ) { + for ( SortSpecification sortSpecification : querySpec.getSortSpecifications() ) { + subquery.addSortSpecification( sortSpecification ); + } + } + subquery.setOffsetClauseExpression( offsetExpression ); + subquery.setFetchClauseExpression( fetchExpression, fetchClauseType ); + + // Mark the query spec as non-root even if it might be the root, to avoid applying the pagination there + final QuerySpec lockingWrapper = new QuerySpec( false, 1 ); + lockingWrapper.getFromClause().addRoot( rootTableGroup ); + for ( SqlSelection sqlSelection : querySpec.getSelectClause().getSqlSelections() ) { + lockingWrapper.getSelectClause().addSqlSelection( sqlSelection ); + } + lockingWrapper.applyPredicate( new InSubQueryPredicate( idExpression, subquery, false ) ); + return lockingWrapper; + } + + private EntityIdentifierMapping identifierMappingForLockingWrapper(QuerySpec querySpec) { + // We only need a locking wrapper for very simple queries + if ( canApplyLockingWrapper( querySpec ) + // There must be the need for locking in this query + && needsLocking( querySpec ) + // The query uses some sort of pagination which makes the wrapper necessary + && needsLockingWrapper( querySpec ) + // The query may not have a group by, having and distinct clause, or use aggregate functions, + // as these features will force the use of follow-on locking + && querySpec.getGroupByClauseExpressions().isEmpty() + && querySpec.getHavingClauseRestrictions() == null + && !querySpec.getSelectClause().isDistinct() + && !hasAggregateFunctions( querySpec ) ) { + return ( (EntityMappingType) querySpec.getFromClause().getRoots().get( 0 ).getModelPart() ).getIdentifierMapping(); + } + return null; + } + + private boolean canApplyLockingWrapper(QuerySpec querySpec) { + final FromClause fromClause; + return querySpec.isRoot() + // Must have a single root with no joins for an entity type + && ( fromClause = querySpec.getFromClause() ).getRoots().size() == 1 + && !fromClause.hasJoins() + && fromClause.getRoots().get( 0 ).getModelPart() instanceof EntityMappingType; + } + + private boolean needsLockingWrapper(QuerySpec querySpec) { + return querySpec.getFetchClauseType() != FetchClauseType.ROWS_ONLY + || hasOffset( querySpec ) + || hasLimit( querySpec ); + } + @Override public void visitOffsetFetchClause(QueryPart queryPart) { if ( !isRowNumberingCurrentQueryPart() ) { if ( supportsOffsetFetchClause() ) { - renderOffsetFetchClause( queryPart, true ); + if ( getQueryPartStack().depth() > 1 && queryPart.hasSortSpecifications() + && getQueryPartStack().peek( 1 ) instanceof QueryGroup + && ( queryPart.isRoot() && !hasLimit() || !queryPart.hasOffsetOrFetchClause() ) ) { + // If the current query part has a query group parent, no offset/fetch clause, but an order by clause, + // then we must render "offset 0 rows" as that is needed for the SQL to be valid + appendSql( " offset 0 rows" ); + } + else { + renderOffsetFetchClause( queryPart, true ); + } } else { assertRowsOnlyFetchClauseType( queryPart ); @@ -505,14 +598,6 @@ private boolean supportsOffsetFetchClause() { return getDialect().supportsFetchClause( FetchClauseType.ROWS_ONLY ); } - @Override - protected boolean renderNamedTableReference(NamedTableReference tableReference, LockMode lockMode) { - appendSql( tableReference.getTableExpression() ); - registerAffectedTable( tableReference ); - renderTableReferenceIdentificationVariable( tableReference ); - return false; - } - @Override protected void visitSetAssignment(Assignment assignment) { final List columnReferences = assignment.getAssignable().getColumnReferences(); @@ -540,15 +625,4 @@ protected void visitSetAssignment(Assignment assignment) { assignment.getAssignedValue().accept( this ); } } - - @Override - public void visitColumnReference(ColumnReference columnReference) { - columnReference.appendReadExpression( this ); - } - - @Override - public void visitAggregateColumnWriteExpression(AggregateColumnWriteExpression aggregateColumnWriteExpression) { - aggregateColumnWriteExpression.appendWriteExpression( this, this ); - } - } diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL81Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL81Dialect.java similarity index 71% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL81Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL81Dialect.java index da9e266c1474..fe2bdb428805 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL81Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL81Dialect.java @@ -4,13 +4,15 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * @deprecated use {@code PostgreSQLDialect(810)} */ @Deprecated -public class PostgreSQL81Dialect extends PostgreSQLDialect { +public class PostgreSQL81Dialect extends PostgreSQLLegacyDialect { public PostgreSQL81Dialect() { super( DatabaseVersion.make( 8, 1 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL82Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL82Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL82Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL82Dialect.java index 7eb38d45d74c..08be54f7a5a7 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL82Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL82Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for Postgres 8.2 and later, adds support for "if exists" when dropping tables @@ -14,7 +16,7 @@ * @deprecated use {@code PostgreSQLDialect(820)} */ @Deprecated -public class PostgreSQL82Dialect extends PostgreSQLDialect { +public class PostgreSQL82Dialect extends PostgreSQLLegacyDialect { public PostgreSQL82Dialect() { super( DatabaseVersion.make( 8, 2 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL91Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL91Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL91Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL91Dialect.java index 376e87b19739..1e6843f4a437 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL91Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL91Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for Postgres 9.1 and later, @@ -15,7 +17,7 @@ * @deprecated use {@code PostgreSQLDialect(910)} */ @Deprecated -public class PostgreSQL91Dialect extends PostgreSQLDialect { +public class PostgreSQL91Dialect extends PostgreSQLLegacyDialect { public PostgreSQL91Dialect() { super( DatabaseVersion.make( 9, 1 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL92Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL92Dialect.java similarity index 77% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL92Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL92Dialect.java index 37bb81a8176b..7b5dc480d144 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL92Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL92Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for Postgres 9.2 and later, @@ -16,7 +18,7 @@ * @deprecated use {@code PostgreSQLDialect(920)} */ @Deprecated -public class PostgreSQL92Dialect extends PostgreSQLDialect { +public class PostgreSQL92Dialect extends PostgreSQLLegacyDialect { public PostgreSQL92Dialect() { super( DatabaseVersion.make( 9, 2 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL93Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL93Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL93Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL93Dialect.java index 6880b68ec439..02d3e66e1080 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL93Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL93Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL Dialect for PostgreSQL 9.3 and later. @@ -15,7 +17,7 @@ * @deprecated use {@code PostgreSQLDialect(810)} */ @Deprecated -public class PostgreSQL93Dialect extends PostgreSQLDialect { +public class PostgreSQL93Dialect extends PostgreSQLLegacyDialect { public PostgreSQL93Dialect() { super( DatabaseVersion.make( 9, 3 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL94Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL94Dialect.java similarity index 75% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL94Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL94Dialect.java index a9df798a3d69..a44141811208 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL94Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL94Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for Postgres 9.4 and later. @@ -13,7 +15,7 @@ * @deprecated use {@code PostgreSQLDialect(940)} */ @Deprecated -public class PostgreSQL94Dialect extends PostgreSQLDialect { +public class PostgreSQL94Dialect extends PostgreSQLLegacyDialect { public PostgreSQL94Dialect() { super( DatabaseVersion.make( 9, 4 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL95Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL95Dialect.java similarity index 75% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL95Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL95Dialect.java index d18912c683a1..d5b69eee2c59 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL95Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL95Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for Postgres 9.5 and later. @@ -13,7 +15,7 @@ * @deprecated use {@code PostgreSQLDialect(950)} */ @Deprecated -public class PostgreSQL95Dialect extends PostgreSQLDialect { +public class PostgreSQL95Dialect extends PostgreSQLLegacyDialect { public PostgreSQL95Dialect() { super( DatabaseVersion.make( 9, 5 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL9Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL9Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL9Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL9Dialect.java index 359aedfa7586..244ef0ea7dea 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/PostgreSQL9Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQL9Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect for Postgres 9 and later. @@ -15,7 +17,7 @@ * @deprecated use {@code PostgreSQLDialect(900)} */ @Deprecated -public class PostgreSQL9Dialect extends PostgreSQLDialect { +public class PostgreSQL9Dialect extends PostgreSQLLegacyDialect { public PostgreSQL9Dialect() { super( DatabaseVersion.make( 9 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQLLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQLLegacyDialect.java index 57cbfbdebb21..c9d608fbf621 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQLLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgreSQLLegacyDialect.java @@ -30,12 +30,12 @@ import org.hibernate.dialect.Dialect; import org.hibernate.dialect.NationalizationSupport; import org.hibernate.dialect.OracleDialect; +import org.hibernate.dialect.PgJdbcHelper; +import org.hibernate.dialect.PostgreSQLCastingInetJdbcType; +import org.hibernate.dialect.PostgreSQLCastingIntervalSecondJdbcType; +import org.hibernate.dialect.PostgreSQLCastingJsonJdbcType; import org.hibernate.dialect.PostgreSQLDriverKind; -import org.hibernate.dialect.PostgreSQLInetJdbcType; -import org.hibernate.dialect.PostgreSQLIntervalSecondJdbcType; -import org.hibernate.dialect.PostgreSQLJsonbJdbcType; -import org.hibernate.dialect.PostgreSQLPGObjectJdbcType; -import org.hibernate.dialect.PostgreSQLStructJdbcType; +import org.hibernate.dialect.PostgreSQLStructCastingJdbcType; import org.hibernate.dialect.Replacer; import org.hibernate.dialect.RowLockStrategy; import org.hibernate.dialect.SelectItemReferenceStrategy; @@ -44,6 +44,7 @@ import org.hibernate.dialect.aggregate.PostgreSQLAggregateSupport; import org.hibernate.dialect.function.CommonFunctionFactory; import org.hibernate.dialect.function.PostgreSQLMinMaxFunction; +import org.hibernate.dialect.function.PostgreSQLTruncFunction; import org.hibernate.dialect.function.PostgreSQLTruncRoundFunction; import org.hibernate.dialect.identity.IdentityColumnSupport; import org.hibernate.dialect.identity.PostgreSQLIdentityColumnSupport; @@ -90,7 +91,6 @@ import org.hibernate.type.descriptor.jdbc.ArrayJdbcType; import org.hibernate.type.descriptor.jdbc.BlobJdbcType; import org.hibernate.type.descriptor.jdbc.ClobJdbcType; -import org.hibernate.type.descriptor.jdbc.InstantAsTimestampWithTimeZoneJdbcType; import org.hibernate.type.descriptor.jdbc.JdbcType; import org.hibernate.type.descriptor.jdbc.ObjectNullAsBinaryTypeJdbcType; import org.hibernate.type.descriptor.jdbc.UUIDJdbcType; @@ -107,9 +107,6 @@ import static org.hibernate.exception.spi.TemplatedViolatedConstraintNameExtractor.extractUsingTemplate; import static org.hibernate.query.sqm.TemporalUnit.DAY; import static org.hibernate.query.sqm.TemporalUnit.EPOCH; -import static org.hibernate.query.sqm.TemporalUnit.MONTH; -import static org.hibernate.query.sqm.TemporalUnit.QUARTER; -import static org.hibernate.query.sqm.TemporalUnit.YEAR; import static org.hibernate.type.SqlTypes.ARRAY; import static org.hibernate.type.SqlTypes.BINARY; import static org.hibernate.type.SqlTypes.BLOB; @@ -129,9 +126,11 @@ import static org.hibernate.type.SqlTypes.OTHER; import static org.hibernate.type.SqlTypes.SQLXML; import static org.hibernate.type.SqlTypes.STRUCT; +import static org.hibernate.type.SqlTypes.TIME; import static org.hibernate.type.SqlTypes.TIMESTAMP; import static org.hibernate.type.SqlTypes.TIMESTAMP_UTC; import static org.hibernate.type.SqlTypes.TIMESTAMP_WITH_TIMEZONE; +import static org.hibernate.type.SqlTypes.TIME_UTC; import static org.hibernate.type.SqlTypes.TINYINT; import static org.hibernate.type.SqlTypes.UUID; import static org.hibernate.type.SqlTypes.VARBINARY; @@ -149,9 +148,7 @@ */ public class PostgreSQLLegacyDialect extends Dialect { - private static final PostgreSQLIdentityColumnSupport IDENTITY_COLUMN_SUPPORT = new PostgreSQLIdentityColumnSupport(); - - private final PostgreSQLDriverKind driverKind; + protected final PostgreSQLDriverKind driverKind; private final UniqueDelegate uniqueDelegate = new CreateTableUniqueDelegate(this); public PostgreSQLLegacyDialect() { @@ -208,6 +205,10 @@ protected String columnType(int sqlTypeCode) { case LONG32VARBINARY: return "bytea"; + // We do not use the time with timezone type because PG deprecated it and it lacks certain operations like subtraction +// case TIME_UTC: +// return columnType( TIME_WITH_TIMEZONE ); + case TIMESTAMP_UTC: return columnType( TIMESTAMP_WITH_TIMEZONE ); @@ -253,21 +254,18 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR if ( getVersion().isSameOrAfter( 8, 2 ) ) { ddlTypeRegistry.addDescriptor( new DdlTypeImpl( UUID, "uuid", this ) ); } - if ( PostgreSQLPGObjectJdbcType.isUsable() ) { - // The following DDL types require that the PGobject class is usable/visible - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( INET, "inet", this ) ); - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "geometry", this ) ); - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOGRAPHY, "geography", this ) ); - ddlTypeRegistry.addDescriptor( new Scale6IntervalSecondDdlType( this ) ); - - if ( getVersion().isSameOrAfter( 9, 2 ) ) { - // Prefer jsonb if possible - if ( getVersion().isSameOrAfter( 9, 4 ) ) { - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "jsonb", this ) ); - } - else { - ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "json", this ) ); - } + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( INET, "inet", this ) ); + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "geometry", this ) ); + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOGRAPHY, "geography", this ) ); + ddlTypeRegistry.addDescriptor( new Scale6IntervalSecondDdlType( this ) ); + + if ( getVersion().isSameOrAfter( 9, 2 ) ) { + // Prefer jsonb if possible + if ( getVersion().isSameOrAfter( 9, 4 ) ) { + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "jsonb", this ) ); + } + else { + ddlTypeRegistry.addDescriptor( new DdlTypeImpl( JSON, "json", this ) ); } } } @@ -325,6 +323,12 @@ public JdbcType resolveSqlTypeDescriptor( break; } break; + case TIME: + // The PostgreSQL JDBC driver reports TIME for timetz, but we use it only for mapping OffsetTime to UTC + if ( "timetz".equals( columnTypeName ) ) { + jdbcTypeCode = TIME_UTC; + } + break; case TIMESTAMP: // The PostgreSQL JDBC driver reports TIMESTAMP for timestamptz, but we use it only for mapping Instant if ( "timestamptz".equals( columnTypeName ) ) { @@ -445,36 +449,31 @@ public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalT if ( unit == null ) { return "(?3-?2)"; } - if ( toTemporalType != TemporalType.TIMESTAMP && fromTemporalType != TemporalType.TIMESTAMP && unit == DAY ) { + if ( toTemporalType == TemporalType.DATE && fromTemporalType == TemporalType.DATE ) { // special case: subtraction of two dates // results in an integer number of days // instead of an INTERVAL - return "(?3-?2)"; + switch ( unit ) { + case YEAR: + case MONTH: + case QUARTER: + return "extract(" + translateDurationField( unit ) + " from age(?3,?2))"; + default: + return "(?3-?2)" + DAY.conversionFactor( unit, this ); + } } else { - StringBuilder pattern = new StringBuilder(); switch ( unit ) { case YEAR: - extractField( pattern, YEAR, fromTemporalType, toTemporalType, unit ); - break; + return "extract(year from ?3-?2)"; case QUARTER: - pattern.append( "(" ); - extractField( pattern, YEAR, fromTemporalType, toTemporalType, unit ); - pattern.append( "+" ); - extractField( pattern, QUARTER, fromTemporalType, toTemporalType, unit ); - pattern.append( ")" ); - break; + return "(extract(year from ?3-?2)*4+extract(month from ?3-?2)/3)"; case MONTH: - pattern.append( "(" ); - extractField( pattern, YEAR, fromTemporalType, toTemporalType, unit ); - pattern.append( "+" ); - extractField( pattern, MONTH, fromTemporalType, toTemporalType, unit ); - pattern.append( ")" ); - break; + return "(extract(year from ?3-?2)*12+extract(month from ?3-?2))"; case WEEK: //week is not supported by extract() when the argument is a duration + return "(extract(day from ?3-?2)/7)"; case DAY: - extractField( pattern, DAY, fromTemporalType, toTemporalType, unit ); - break; + return "extract(day from ?3-?2)"; //in order to avoid multiple calls to extract(), //we use extract(epoch from x - y) * factor for //all the following units: @@ -483,15 +482,14 @@ public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalT case SECOND: case NANOSECOND: case NATIVE: - extractField( pattern, EPOCH, fromTemporalType, toTemporalType, unit ); - break; + return "extract(epoch from ?3-?2)" + EPOCH.conversionFactor( unit, this ); default: throw new SemanticException( "unrecognized field: " + unit ); } - return pattern.toString(); } } + @Deprecated protected void extractField( StringBuilder pattern, TemporalUnit unit, @@ -501,7 +499,7 @@ protected void extractField( pattern.append( "extract(" ); pattern.append( translateDurationField( unit ) ); pattern.append( " from " ); - if ( toTimestamp != TemporalType.TIMESTAMP && fromTimestamp != TemporalType.TIMESTAMP ) { + if ( toTimestamp == TemporalType.DATE && fromTimestamp == TemporalType.DATE ) { // special case subtraction of two // dates results in an integer not // an Interval @@ -539,8 +537,6 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio CommonFunctionFactory functionFactory = new CommonFunctionFactory(functionContributions); - functionFactory.round_roundFloor(); //Postgres round(x,n) does not accept double - functionFactory.trunc_truncFloor(); functionFactory.cot(); functionFactory.radians(); functionFactory.degrees(); @@ -569,7 +565,6 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.toCharNumberDateTimestamp(); functionFactory.concat_pipeOperator( "convert_from(lo_get(?1),pg_client_encoding())" ); functionFactory.localtimeLocaltimestamp(); - functionFactory.dateTrunc(); functionFactory.length_characterLength_pattern( "length(lo_get(?1),pg_client_encoding())" ); functionFactory.bitLength_pattern( "bit_length(?1)", "length(lo_get(?1))*8" ); functionFactory.octetLength_pattern( "octet_length(?1)", "length(lo_get(?1))" ); @@ -611,9 +606,11 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio "round", new PostgreSQLTruncRoundFunction( "round", true ) ); functionContributions.getFunctionRegistry().register( - "trunc", new PostgreSQLTruncRoundFunction( "trunc", true ) + "trunc", + new PostgreSQLTruncFunction( true, functionContributions.getTypeConfiguration() ) ); functionContributions.getFunctionRegistry().registerAlternateKey( "truncate", "trunc" ); + functionFactory.dateTrunc(); } /** @@ -1008,7 +1005,7 @@ public boolean qualifyIndexName() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return IDENTITY_COLUMN_SUPPORT; + return PostgreSQLIdentityColumnSupport.INSTANCE; } @Override @@ -1311,8 +1308,11 @@ public void augmentRecognizedTableTypes(List tableTypesList) { @Override public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { - super.contributeTypes(typeContributions, serviceRegistry); + super.contributeTypes( typeContributions, serviceRegistry ); + contributePostgreSQLTypes( typeContributions, serviceRegistry ); + } + protected void contributePostgreSQLTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration() .getJdbcTypeRegistry(); // For discussion of BLOB support in Postgres, as of 8.4, have a peek at @@ -1325,22 +1325,58 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry // dialect uses oid for Blobs, byte arrays cannot be used. jdbcTypeRegistry.addDescriptor( Types.BLOB, BlobJdbcType.BLOB_BINDING ); jdbcTypeRegistry.addDescriptor( Types.CLOB, ClobJdbcType.CLOB_BINDING ); - jdbcTypeRegistry.addDescriptor( TIMESTAMP_UTC, InstantAsTimestampWithTimeZoneJdbcType.INSTANCE ); + // Don't use this type due to https://github.com/pgjdbc/pgjdbc/issues/2862 + //jdbcTypeRegistry.addDescriptor( TimestampUtcAsOffsetDateTimeJdbcType.INSTANCE ); jdbcTypeRegistry.addDescriptor( XmlJdbcType.INSTANCE ); if ( driverKind == PostgreSQLDriverKind.PG_JDBC ) { - if ( PostgreSQLPGObjectJdbcType.isUsable() ) { - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLInetJdbcType.INSTANCE ); - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLIntervalSecondJdbcType.INSTANCE ); - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLStructJdbcType.INSTANCE ); + if ( PgJdbcHelper.isUsable( serviceRegistry ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getInetJdbcType( serviceRegistry ) ); + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getIntervalJdbcType( serviceRegistry ) ); + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getStructJdbcType( serviceRegistry ) ); + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingInetJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingIntervalSecondJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLStructCastingJdbcType.INSTANCE ); } if ( getVersion().isSameOrAfter( 8, 2 ) ) { // HHH-9562 jdbcTypeRegistry.addDescriptorIfAbsent( UUIDJdbcType.INSTANCE ); if ( getVersion().isSameOrAfter( 9, 2 ) ) { - if ( PostgreSQLPGObjectJdbcType.isUsable() ) { - jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLJsonbJdbcType.INSTANCE ); + if ( getVersion().isSameOrAfter( 9, 4 ) ) { + if ( PgJdbcHelper.isUsable( serviceRegistry ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getJsonbJdbcType( serviceRegistry ) ); + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSONB_INSTANCE ); + } + } + else { + if ( PgJdbcHelper.isUsable( serviceRegistry ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( PgJdbcHelper.getJsonJdbcType( serviceRegistry ) ); + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSON_INSTANCE ); + } + } + } + } + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingInetJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingIntervalSecondJdbcType.INSTANCE ); + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLStructCastingJdbcType.INSTANCE ); + + if ( getVersion().isSameOrAfter( 8, 2 ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( UUIDJdbcType.INSTANCE ); + if ( getVersion().isSameOrAfter( 9, 2 ) ) { + if ( getVersion().isSameOrAfter( 9, 4 ) ) { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSONB_INSTANCE ); + } + else { + jdbcTypeRegistry.addDescriptorIfAbsent( PostgreSQLCastingJsonJdbcType.JSON_INSTANCE ); } } } @@ -1376,23 +1412,13 @@ public boolean canBatchTruncate() { // disabled foreign key constraints still prevent 'truncate table' // (these would help if we used 'delete' instead of 'truncate') -// @Override -// public String getDisableConstraintsStatement() { -// return "set constraints all deferred"; -// } -// -// @Override -// public String getEnableConstraintsStatement() { -// return "set constraints all immediate"; -// } -// -// @Override -// public String getDisableConstraintStatement(String tableName, String name) { -// return "alter table " + tableName + " alter constraint " + name + " deferrable"; -// } -// -// @Override -// public String getEnableConstraintStatement(String tableName, String name) { -// return "alter table " + tableName + " alter constraint " + name + " deferrable"; -// } + @Override + public String rowId(String rowId) { + return "ctid"; + } + + @Override + public int rowIdSqlType() { + return OTHER; + } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgresPlusLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgresPlusLegacyDialect.java index ae23685a8dcd..4e0ce28492c3 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgresPlusLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/PostgresPlusLegacyDialect.java @@ -20,8 +20,6 @@ import jakarta.persistence.TemporalType; -import static org.hibernate.query.sqm.TemporalUnit.DAY; - /** * An SQL dialect for Postgres Plus * @@ -84,12 +82,10 @@ public String currentTimestamp() { @Override public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalType, TemporalType toTemporalType) { - if ( toTemporalType != TemporalType.TIMESTAMP && fromTemporalType != TemporalType.TIMESTAMP && unit == DAY ) { + if ( toTemporalType == TemporalType.DATE && fromTemporalType == TemporalType.DATE ) { // special case: subtraction of two dates results in an INTERVAL on Postgres Plus // because there is no date type i.e. without time for Oracle compatibility - final StringBuilder pattern = new StringBuilder(); - extractField( pattern, DAY, fromTemporalType, toTemporalType, unit ); - return pattern.toString(); + return super.timestampdiffPattern( unit, TemporalType.TIMESTAMP, TemporalType.TIMESTAMP ); } return super.timestampdiffPattern( unit, fromTemporalType, toTemporalType ); } diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/SQLServer2005Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServer2005Dialect.java similarity index 75% rename from hibernate-core/src/main/java/org/hibernate/dialect/SQLServer2005Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServer2005Dialect.java index 27d89fca8d1f..fe25766b5841 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/SQLServer2005Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServer2005Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * A dialect for Microsoft SQL Server 2005. @@ -15,7 +17,7 @@ * @deprecated use {@code SQLServerDialect(9)} */ @Deprecated -public class SQLServer2005Dialect extends SQLServerDialect { +public class SQLServer2005Dialect extends SQLServerLegacyDialect { public SQLServer2005Dialect() { super( DatabaseVersion.make( 9 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacyDialect.java index 29887150510e..1dbc9470d270 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacyDialect.java @@ -21,6 +21,7 @@ import org.hibernate.dialect.function.CommonFunctionFactory; import org.hibernate.dialect.function.CountFunction; import org.hibernate.dialect.function.SQLServerFormatEmulation; +import org.hibernate.dialect.function.SqlServerConvertTruncFunction; import org.hibernate.dialect.identity.IdentityColumnSupport; import org.hibernate.dialect.identity.SQLServerIdentityColumnSupport; import org.hibernate.dialect.pagination.LimitHandler; @@ -64,7 +65,8 @@ import org.hibernate.type.StandardBasicTypes; import org.hibernate.type.descriptor.java.PrimitiveByteArrayJavaType; import org.hibernate.type.descriptor.jdbc.JdbcType; -import org.hibernate.type.descriptor.jdbc.SmallIntJdbcType; +import org.hibernate.type.descriptor.jdbc.TimestampUtcAsJdbcTimestampJdbcType; +import org.hibernate.type.descriptor.jdbc.TinyIntAsSmallIntJdbcType; import org.hibernate.type.descriptor.jdbc.UUIDJdbcType; import org.hibernate.type.descriptor.jdbc.XmlJdbcType; import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; @@ -168,6 +170,7 @@ protected String columnType(int sqlTypeCode) { return getVersion().isSameOrAfter( 10 ) ? "time" : super.columnType( sqlTypeCode ); case TIMESTAMP: return getVersion().isSameOrAfter( 10 ) ? "datetime2($p)" : super.columnType( sqlTypeCode ); + case TIME_WITH_TIMEZONE: case TIMESTAMP_WITH_TIMEZONE: return getVersion().isSameOrAfter( 10 ) ? "datetimeoffset($p)" : super.columnType( sqlTypeCode ); } @@ -265,9 +268,12 @@ public int getMaxIdentifierLength() { public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) { super.contributeTypes( typeContributions, serviceRegistry ); + // Need to bind as java.sql.Timestamp because reading OffsetDateTime from a "datetime2" column fails + typeContributions.contributeJdbcType( TimestampUtcAsJdbcTimestampJdbcType.INSTANCE ); + typeContributions.getTypeConfiguration().getJdbcTypeRegistry().addDescriptor( Types.TINYINT, - SmallIntJdbcType.INSTANCE + TinyIntAsSmallIntJdbcType.INSTANCE ); typeContributions.contributeJdbcType( XmlJdbcType.INSTANCE ); typeContributions.contributeJdbcType( UUIDJdbcType.INSTANCE ); @@ -294,7 +300,8 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio "count_big", "+", "varchar(max)", - false + false, + "varbinary(max)" ) ); @@ -303,7 +310,6 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.log_log(); - functionFactory.trunc_round(); functionFactory.round_round(); functionFactory.everyAny_minMaxIif(); functionFactory.octetLength_pattern( "datalength(?1)" ); @@ -366,6 +372,15 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio } if ( getVersion().isSameOrAfter( 16 ) ) { functionFactory.leastGreatest(); + functionFactory.dateTrunc_datetrunc(); + functionFactory.trunc_round_datetrunc(); + } + else { + functionContributions.getFunctionRegistry().register( + "trunc", + new SqlServerConvertTruncFunction( functionContributions.getTypeConfiguration() ) + ); + functionContributions.getFunctionRegistry().registerAlternateKey( "truncate", "trunc" ); } } @@ -609,7 +624,7 @@ public int getInExpressionCountLimit() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return new SQLServerIdentityColumnSupport(); + return SQLServerIdentityColumnSupport.INSTANCE; } @Override @@ -771,6 +786,8 @@ public String extractPattern(TemporalUnit unit) { case SECOND: //this should evaluate to a floating point type return "(datepart(second,?2)+datepart(nanosecond,?2)/1e9)"; + case EPOCH: + return "datediff_big(second, '1970-01-01', ?2)"; case WEEK: // Thanks https://www.sqlservercentral.com/articles/a-simple-formula-to-calculate-the-iso-week-number if ( getVersion().isBefore( 10 ) ) { diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacySqlAstTranslator.java index 04406ec802ad..fe2d2d5cd582 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLServerLegacySqlAstTranslator.java @@ -124,7 +124,7 @@ protected boolean renderNamedTableReference(NamedTableReference tableReference, appendSql( UNION_ALL ); searchIndex = unionIndex + UNION_ALL.length(); } - append( tableExpression, searchIndex, tableExpression.length() - 2 ); + append( tableExpression, searchIndex, tableExpression.length() - 1 ); renderLockHint( lockMode ); appendSql( " )" ); @@ -323,6 +323,12 @@ protected void visitSqlSelections(SelectClause selectClause) { else if ( offsetFetchClauseMode == OffsetFetchClauseMode.EMULATED ) { renderTopClause( querySpec, isRowsOnlyFetchClauseType( querySpec ), true ); } + else if ( getQueryPartStack().depth() > 1 && querySpec.hasSortSpecifications() + && getQueryPartStack().peek( 1 ) instanceof QueryGroup ) { + // If the current query spec has a query group parent, no offset/fetch clause, but an order by clause, + // then we must render "top 100 percent" as that is needed for the SQL to be valid + appendSql( "top 100 percent " ); + } super.visitSqlSelections( selectClause ); } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLiteDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLiteDialect.java index f56d9e351f2e..f1d8c8f92cd1 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLiteDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SQLiteDialect.java @@ -94,8 +94,6 @@ */ public class SQLiteDialect extends Dialect { - private static final SQLiteIdentityColumnSupport IDENTITY_COLUMN_SUPPORT = new SQLiteIdentityColumnSupport(); - private final UniqueDelegate uniqueDelegate; public SQLiteDialect(DialectResolutionInfo info) { @@ -545,7 +543,7 @@ public boolean supportsWindowFunctions() { @Override public IdentityColumnSupport getIdentityColumnSupport() { - return IDENTITY_COLUMN_SUPPORT; + return SQLiteIdentityColumnSupport.INSTANCE; } @Override diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/Sybase11Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Sybase11Dialect.java similarity index 66% rename from hibernate-core/src/main/java/org/hibernate/dialect/Sybase11Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Sybase11Dialect.java index 8f913786cb10..35b0733d8825 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/Sybase11Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/Sybase11Dialect.java @@ -4,17 +4,19 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * A SQL dialect suitable for use with Sybase 11.9.2 * (specifically: avoids ANSI JOIN syntax) * * @author Colm O' Flaherty - * @deprecated use {@link SybaseASEDialect} instead + * @deprecated use {@code SybaseASELegacyDialect( DatabaseVersion.make( 11 ) )} */ @Deprecated -public class Sybase11Dialect extends SybaseASEDialect { +public class Sybase11Dialect extends SybaseASELegacyDialect { public Sybase11Dialect() { super( DatabaseVersion.make( 11 ) ); } diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/SybaseASE157Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASE157Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/SybaseASE157Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASE157Dialect.java index 6c8e152b4fd3..c2f993d0392a 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/SybaseASE157Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASE157Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect targeting Sybase Adaptive Server Enterprise (ASE) 15.7 and higher. @@ -14,7 +16,7 @@ * @deprecated use {@code SybaseASEDialect(1570)} */ @Deprecated -public class SybaseASE157Dialect extends SybaseASEDialect { +public class SybaseASE157Dialect extends SybaseASELegacyDialect { public SybaseASE157Dialect() { super( DatabaseVersion.make( 15, 7 ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/dialect/SybaseASE15Dialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASE15Dialect.java similarity index 76% rename from hibernate-core/src/main/java/org/hibernate/dialect/SybaseASE15Dialect.java rename to hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASE15Dialect.java index 23091d7683d8..dde0a36658c6 100644 --- a/hibernate-core/src/main/java/org/hibernate/dialect/SybaseASE15Dialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASE15Dialect.java @@ -4,7 +4,9 @@ * License: GNU Lesser General Public License (LGPL), version 2.1 or later. * See the lgpl.txt file in the root directory or . */ -package org.hibernate.dialect; +package org.hibernate.community.dialect; + +import org.hibernate.dialect.DatabaseVersion; /** * An SQL dialect targeting Sybase Adaptive Server Enterprise (ASE) 15 and higher. @@ -14,7 +16,7 @@ * @deprecated use {@code SybaseASEDialect(1500)} */ @Deprecated -public class SybaseASE15Dialect extends SybaseASEDialect { +public class SybaseASE15Dialect extends SybaseASELegacyDialect { public SybaseASE15Dialect() { super( DatabaseVersion.make( 15 ) ); diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacyDialect.java index 45f3c7cedd6d..bf1b99965c06 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacyDialect.java @@ -12,11 +12,13 @@ import java.sql.Types; import java.util.Map; +import org.hibernate.LockMode; import org.hibernate.LockOptions; import org.hibernate.boot.model.TypeContributions; import org.hibernate.dialect.DatabaseVersion; import org.hibernate.dialect.Dialect; import org.hibernate.dialect.RowLockStrategy; +import org.hibernate.dialect.SybaseDriverKind; import org.hibernate.dialect.pagination.LimitHandler; import org.hibernate.dialect.pagination.TopLimitHandler; import org.hibernate.engine.jdbc.Size; @@ -123,7 +125,7 @@ protected void registerColumnTypes(TypeContributions typeContributions, ServiceR // According to Wikipedia bigdatetime and bigtime were added in 15.5 // But with jTDS we can't use them as the driver can't handle the types - if ( getVersion().isSameOrAfter( 15, 5 ) && !jtdsDriver ) { + if ( getVersion().isSameOrAfter( 15, 5 ) && getDriverKind() != SybaseDriverKind.JTDS ) { ddlTypeRegistry.addDescriptor( CapacityDependentDdlType.builder( DATE, "bigdatetime", "bigdatetime", this ) .withTypeCapacity( 3, "datetime" ) @@ -230,7 +232,7 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry .getJdbcTypeRegistry(); jdbcTypeRegistry.addDescriptor( Types.BOOLEAN, TinyIntJdbcType.INSTANCE ); // At least the jTDS driver does not support this type code - if ( jtdsDriver ) { + if ( getDriverKind() == SybaseDriverKind.JTDS ) { jdbcTypeRegistry.addDescriptor( Types.TIMESTAMP_WITH_TIMEZONE, TimestampJdbcType.INSTANCE ); } } @@ -589,34 +591,17 @@ public boolean supportsLobValueChangePropagation() { } @Override - public RowLockStrategy getWriteRowLockStrategy() { - return getVersion().isSameOrAfter( 15, 7 ) ? RowLockStrategy.COLUMN : RowLockStrategy.TABLE; - } - - @Override - public String getForUpdateString() { - return getVersion().isBefore( 15, 7 ) ? "" : " for update"; - } - - @Override - public String getForUpdateString(String aliases) { - return getVersion().isBefore( 15, 7 ) - ? "" - : getForUpdateString() + " of " + aliases; + public boolean supportsSkipLocked() { + // It does support skipping locked rows only for READ locking + return false; } @Override public String appendLockHint(LockOptions mode, String tableName) { - //TODO: is this really necessary??! - return getVersion().isBefore( 15, 7 ) ? super.appendLockHint( mode, tableName ) : tableName; - } - - @Override - public String applyLocksToSql(String sql, LockOptions aliasedLockOptions, Map keyColumnNames) { - //TODO: is this really correct? - return getVersion().isBefore( 15, 7 ) - ? super.applyLocksToSql( sql, aliasedLockOptions, keyColumnNames ) - : sql + new ForUpdateFragment( this, aliasedLockOptions, keyColumnNames ).toFragmentString(); + final String lockHint = super.appendLockHint( mode, tableName ); + return !mode.getLockMode().greaterThan( LockMode.READ ) && mode.getTimeOut() == LockOptions.SKIP_LOCKED + ? lockHint + " readpast" + : lockHint; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacySqlAstTranslator.java index 6cd8e2c98e76..17b1e501f7a4 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseASELegacySqlAstTranslator.java @@ -10,8 +10,10 @@ import java.util.function.Consumer; import org.hibernate.LockMode; +import org.hibernate.LockOptions; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.query.sqm.ComparisonOperator; +import org.hibernate.sql.ast.Clause; import org.hibernate.sql.ast.SqlAstJoinType; import org.hibernate.sql.ast.SqlAstNodeRenderingMode; import org.hibernate.sql.ast.spi.AbstractSqlAstTranslator; @@ -46,6 +48,8 @@ */ public class SybaseASELegacySqlAstTranslator extends AbstractSqlAstTranslator { + private static final String UNION_ALL = " union all "; + public SybaseASELegacySqlAstTranslator(SessionFactoryImplementor sessionFactory, Statement statement) { super( sessionFactory, statement ); } @@ -109,14 +113,64 @@ protected void visitAnsiCaseSimpleExpression( @Override protected boolean renderNamedTableReference(NamedTableReference tableReference, LockMode lockMode) { - super.renderNamedTableReference( tableReference, lockMode ); - if ( getDialect().getVersion().isBefore( 15, 7 ) ) { - if ( LockMode.READ.lessThan( lockMode ) ) { + final String tableExpression = tableReference.getTableExpression(); + if ( tableReference instanceof UnionTableReference && lockMode != LockMode.NONE && tableExpression.charAt( 0 ) == '(' ) { + // SQL Server requires to push down the lock hint to the actual table names + int searchIndex = 0; + int unionIndex; + while ( ( unionIndex = tableExpression.indexOf( UNION_ALL, searchIndex ) ) != -1 ) { + append( tableExpression, searchIndex, unionIndex ); + renderLockHint( lockMode ); + appendSql( UNION_ALL ); + searchIndex = unionIndex + UNION_ALL.length(); + } + append( tableExpression, searchIndex, tableExpression.length() - 1 ); + renderLockHint( lockMode ); + appendSql( " )" ); + + registerAffectedTable( tableReference ); + final Clause currentClause = getClauseStack().getCurrent(); + if ( rendersTableReferenceAlias( currentClause ) ) { + final String identificationVariable = tableReference.getIdentificationVariable(); + if ( identificationVariable != null ) { + appendSql( ' ' ); + appendSql( identificationVariable ); + } + } + } + else { + super.renderNamedTableReference( tableReference, lockMode ); + renderLockHint( lockMode ); + } + // Just always return true because SQL Server doesn't support the FOR UPDATE clause + return true; + } + + private void renderLockHint(LockMode lockMode) { + final int effectiveLockTimeout = getEffectiveLockTimeout( lockMode ); + switch ( lockMode ) { + case PESSIMISTIC_READ: + case PESSIMISTIC_WRITE: + case WRITE: { + switch ( effectiveLockTimeout ) { + case LockOptions.SKIP_LOCKED: + appendSql( " holdlock readpast" ); + break; + default: + appendSql( " holdlock" ); + break; + } + break; + } + case UPGRADE_SKIPLOCKED: { + appendSql( " holdlock readpast" ); + break; + } + case UPGRADE_NOWAIT: { appendSql( " holdlock" ); + break; } - return true; } - return false; } @Override @@ -152,10 +206,7 @@ protected void renderTableGroupJoin(TableGroupJoin tableGroupJoin, List extends AbstractSqlAstTranslator { + private static final String UNION_ALL = " union all "; + public SybaseAnywhereSqlAstTranslator(SessionFactoryImplementor sessionFactory, Statement statement) { super( sessionFactory, statement ); } @@ -95,16 +99,48 @@ protected void visitAnsiCaseSimpleExpression( @Override protected boolean renderNamedTableReference(NamedTableReference tableReference, LockMode lockMode) { - super.renderNamedTableReference( tableReference, lockMode ); if ( getDialect().getVersion().isBefore( 10 ) ) { - if ( LockMode.READ.lessThan( lockMode ) ) { - appendSql( " holdlock" ); + final String tableExpression = tableReference.getTableExpression(); + if ( tableReference instanceof UnionTableReference && lockMode != LockMode.NONE && tableExpression.charAt( 0 ) == '(' ) { + // SQL Server requires to push down the lock hint to the actual table names + int searchIndex = 0; + int unionIndex; + while ( ( unionIndex = tableExpression.indexOf( UNION_ALL, searchIndex ) ) != -1 ) { + append( tableExpression, searchIndex, unionIndex ); + renderLockHint( lockMode ); + appendSql( UNION_ALL ); + searchIndex = unionIndex + UNION_ALL.length(); + } + append( tableExpression, searchIndex, tableExpression.length() - 1 ); + renderLockHint( lockMode ); + appendSql( " )" ); + + registerAffectedTable( tableReference ); + final Clause currentClause = getClauseStack().getCurrent(); + if ( rendersTableReferenceAlias( currentClause ) ) { + final String identificationVariable = tableReference.getIdentificationVariable(); + if ( identificationVariable != null ) { + appendSql( ' ' ); + appendSql( identificationVariable ); + } + } + } + else { + super.renderNamedTableReference( tableReference, lockMode ); + renderLockHint( lockMode ); } + // Just always return true because SQL Server doesn't support the FOR UPDATE clause return true; } return false; } + private void renderLockHint(LockMode lockMode) { + if ( LockMode.READ.lessThan( lockMode ) ) { + appendSql( " holdlock" ); + } + } + @Override protected void renderForUpdateClause(QuerySpec querySpec, ForUpdateClause forUpdateClause) { if ( getDialect().getVersion().isBefore( 10 ) ) { diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacyDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacyDialect.java index 1610d34c82e8..2b35a5c10eb5 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacyDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacyDialect.java @@ -9,15 +9,21 @@ import java.sql.DatabaseMetaData; import java.sql.SQLException; import java.sql.Types; +import java.time.temporal.TemporalAccessor; +import java.util.Calendar; +import java.util.Date; +import java.util.TimeZone; import org.hibernate.boot.model.FunctionContributions; import org.hibernate.boot.model.TypeContributions; import org.hibernate.dialect.AbstractTransactSQLDialect; import org.hibernate.dialect.DatabaseVersion; import org.hibernate.dialect.NationalizationSupport; +import org.hibernate.dialect.SybaseDriverKind; import org.hibernate.dialect.function.CommonFunctionFactory; import org.hibernate.dialect.function.CountFunction; import org.hibernate.dialect.function.IntegralTimestampaddFunction; +import org.hibernate.dialect.function.SybaseTruncFunction; import org.hibernate.dialect.unique.SkipNullableUniqueDelegate; import org.hibernate.dialect.unique.UniqueDelegate; import org.hibernate.engine.jdbc.dialect.spi.DialectResolutionInfo; @@ -28,6 +34,7 @@ import org.hibernate.engine.spi.LoadQueryInfluencers; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.procedure.internal.JTDSCallableStatementSupport; +import org.hibernate.procedure.internal.SybaseCallableStatementSupport; import org.hibernate.procedure.spi.CallableStatementSupport; import org.hibernate.query.spi.QueryOptions; import org.hibernate.query.spi.QueryParameterBindings; @@ -51,15 +58,21 @@ import org.hibernate.sql.ast.tree.select.SelectStatement; import org.hibernate.sql.exec.spi.JdbcOperation; import org.hibernate.type.JavaObjectType; +import org.hibernate.type.NullType; import org.hibernate.type.descriptor.jdbc.BlobJdbcType; import org.hibernate.type.descriptor.jdbc.ClobJdbcType; import org.hibernate.type.descriptor.jdbc.JdbcType; -import org.hibernate.type.descriptor.jdbc.ObjectNullAsNullTypeJdbcType; -import org.hibernate.type.descriptor.jdbc.SmallIntJdbcType; +import org.hibernate.type.descriptor.jdbc.ObjectNullAsBinaryTypeJdbcType; +import org.hibernate.type.descriptor.jdbc.TinyIntAsSmallIntJdbcType; import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; import jakarta.persistence.TemporalType; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsDate; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsLocalTime; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTime; +import static org.hibernate.type.descriptor.DateTimeUtils.appendAsTimestampWithMillis; + /** * Superclass for all Sybase dialects. @@ -68,11 +81,13 @@ */ public class SybaseLegacyDialect extends AbstractTransactSQLDialect { - protected final boolean jtdsDriver; - //All Sybase dialects share an IN list size limit. private static final int PARAM_LIST_SIZE_LIMIT = 250000; private final UniqueDelegate uniqueDelegate = new SkipNullableUniqueDelegate(this); + private final SybaseDriverKind driverKind; + + @Deprecated(forRemoval = true) + protected final boolean jtdsDriver; public SybaseLegacyDialect() { this( DatabaseVersion.make( 11, 0 ) ); @@ -80,13 +95,18 @@ public SybaseLegacyDialect() { public SybaseLegacyDialect(DatabaseVersion version) { super(version); - jtdsDriver = true; + this.driverKind = SybaseDriverKind.OTHER; + this.jtdsDriver = true; } public SybaseLegacyDialect(DialectResolutionInfo info) { super(info); - jtdsDriver = info.getDriverName() != null - && info.getDriverName().contains( "jTDS" ); + this.driverKind = SybaseDriverKind.determineKind( info ); + this.jtdsDriver = driverKind == SybaseDriverKind.JTDS; + } + + public SybaseDriverKind getDriverKind() { + return driverKind; } @Override @@ -102,10 +122,6 @@ public JdbcType resolveSqlTypeDescriptor( if ( precision == 19 && scale == 0 ) { return jdbcTypeRegistry.getDescriptor( Types.BIGINT ); } - case Types.TINYINT: - if ( jtdsDriver ) { - return jdbcTypeRegistry.getDescriptor( Types.SMALLINT ); - } } return super.resolveSqlTypeDescriptor( columnTypeName, @@ -167,8 +183,8 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry super.contributeTypes(typeContributions, serviceRegistry); final JdbcTypeRegistry jdbcTypeRegistry = typeContributions.getTypeConfiguration() .getJdbcTypeRegistry(); - if ( jtdsDriver ) { - jdbcTypeRegistry.addDescriptor( Types.TINYINT, SmallIntJdbcType.INSTANCE ); + if ( driverKind == SybaseDriverKind.JTDS ) { + jdbcTypeRegistry.addDescriptor( Types.TINYINT, TinyIntAsSmallIntJdbcType.INSTANCE ); // The jTDS driver doesn't support the JDBC4 signatures using 'long length' for stream bindings jdbcTypeRegistry.addDescriptor( Types.CLOB, ClobJdbcType.CLOB_BINDING ); @@ -185,12 +201,20 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry jdbcTypeRegistry.addDescriptor( Types.BLOB, BlobJdbcType.PRIMITIVE_ARRAY_BINDING ); // Sybase requires a custom binder for binding untyped nulls with the NULL type - typeContributions.contributeJdbcType( ObjectNullAsNullTypeJdbcType.INSTANCE ); + typeContributions.contributeJdbcType( ObjectNullAsBinaryTypeJdbcType.INSTANCE ); // Until we remove StandardBasicTypes, we have to keep this typeContributions.contributeType( new JavaObjectType( - ObjectNullAsNullTypeJdbcType.INSTANCE, + ObjectNullAsBinaryTypeJdbcType.INSTANCE, + typeContributions.getTypeConfiguration() + .getJavaTypeRegistry() + .getDescriptor( Object.class ) + ) + ); + typeContributions.contributeType( + new NullType( + ObjectNullAsBinaryTypeJdbcType.INSTANCE, typeContributions.getTypeConfiguration() .getJavaTypeRegistry() .getDescriptor( Object.class ) @@ -201,7 +225,7 @@ public void contributeTypes(TypeContributions typeContributions, ServiceRegistry @Override public NationalizationSupport getNationalizationSupport() { // At least the jTDS driver doesn't support this - return jtdsDriver ? NationalizationSupport.IMPLICIT : super.getNationalizationSupport(); + return driverKind == SybaseDriverKind.JTDS ? NationalizationSupport.IMPLICIT : super.getNationalizationSupport(); } @Override @@ -216,7 +240,6 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionFactory.varPopSamp_varp(); functionFactory.stddevPopSamp(); functionFactory.varPopSamp(); - functionFactory.trunc_floorPower(); functionFactory.round_round(); // For SQL-Server we need to cast certain arguments to varchar(16384) to be able to concat them @@ -248,6 +271,11 @@ public void initializeFunctionRegistry(FunctionContributions functionContributio functionContributions.getFunctionRegistry().register( "timestampadd", new IntegralTimestampaddFunction( this, functionContributions.getTypeConfiguration() ) ); + functionContributions.getFunctionRegistry().register( + "trunc", + new SybaseTruncFunction( functionContributions.getTypeConfiguration() ) + ); + functionContributions.getFunctionRegistry().registerAlternateKey( "truncate", "trunc" ); } @Override @@ -276,16 +304,95 @@ public String castPattern(CastType from, CastType to) { if ( to == CastType.STRING ) { switch ( from ) { case DATE: - return "str_replace(convert(varchar,?1,102),'.','-')"; + return "substring(convert(varchar,?1,23),1,10)"; case TIME: - return "convert(varchar,?1,108)"; + return "convert(varchar,?1,8)"; case TIMESTAMP: - return "str_replace(convert(varchar,?1,23),'T',' ')"; + return "convert(varchar,?1,140)"; } } return super.castPattern( from, to ); } + /* Something odd is going on with the jConnect driver when using JDBC escape syntax, so let's use native functions */ + + @Override + public void appendDateTimeLiteral( + SqlAppender appender, + TemporalAccessor temporalAccessor, + TemporalType precision, + TimeZone jdbcTimeZone) { + switch ( precision ) { + case DATE: + appender.appendSql( "convert(date,'" ); + appendAsDate( appender, temporalAccessor ); + appender.appendSql( "',140)" ); + break; + case TIME: + appender.appendSql( "convert(time,'" ); + appendAsTime( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone ); + appender.appendSql( "',8)" ); + break; + case TIMESTAMP: + appender.appendSql( "convert(datetime,'" ); + appendAsTimestampWithMillis( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone ); + appender.appendSql( "',140)" ); + break; + default: + throw new IllegalArgumentException(); + } + } + + @Override + public void appendDateTimeLiteral(SqlAppender appender, Date date, TemporalType precision, TimeZone jdbcTimeZone) { + switch ( precision ) { + case DATE: + appender.appendSql( "convert(date,'" ); + appendAsDate( appender, date ); + appender.appendSql( "',140)" ); + break; + case TIME: + appender.appendSql( "convert(time,'" ); + appendAsLocalTime( appender, date ); + appender.appendSql( "',8)" ); + break; + case TIMESTAMP: + appender.appendSql( "convert(datetime,'" ); + appendAsTimestampWithMillis( appender, date, jdbcTimeZone ); + appender.appendSql( "',140)" ); + break; + default: + throw new IllegalArgumentException(); + } + } + + @Override + public void appendDateTimeLiteral( + SqlAppender appender, + Calendar calendar, + TemporalType precision, + TimeZone jdbcTimeZone) { + switch ( precision ) { + case DATE: + appender.appendSql( "convert(date,'" ); + appendAsDate( appender, calendar ); + appender.appendSql( "',140)" ); + break; + case TIME: + appender.appendSql( "convert(time,'" ); + appendAsLocalTime( appender, calendar ); + appender.appendSql( "',8)" ); + break; + case TIMESTAMP: + appender.appendSql( "convert(datetime,'" ); + appendAsTimestampWithMillis( appender, calendar, jdbcTimeZone ); + appender.appendSql( "',140)" ); + break; + default: + throw new IllegalArgumentException(); + } + } + @Override public String translateExtractField(TemporalUnit unit) { switch ( unit ) { @@ -346,9 +453,8 @@ public IdentifierHelper buildIdentifierHelper(IdentifierHelperBuilder builder, D @Override public NameQualifierSupport getNameQualifierSupport() { - if ( getVersion().isSameOrAfter( 15 ) ) { - return NameQualifierSupport.BOTH; - } + // No support for schemas: https://userapps.support.sap.com/sap/support/knowledge/en/2591730 + // Authorization schemas seem to be something different: https://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.infocenter.dc36272.1550/html/commands/X48762.htm return NameQualifierSupport.CATALOG; } @@ -359,6 +465,12 @@ public UniqueDelegate getUniqueDelegate() { @Override public CallableStatementSupport getCallableStatementSupport() { - return jtdsDriver ? JTDSCallableStatementSupport.INSTANCE : super.getCallableStatementSupport(); + return driverKind == SybaseDriverKind.JTDS ? JTDSCallableStatementSupport.INSTANCE : SybaseCallableStatementSupport.INSTANCE; + } + + @Override + public boolean supportsNamedParameters(DatabaseMetaData databaseMetaData) throws SQLException { + // Only the jTDS driver supports named parameters properly + return driverKind == SybaseDriverKind.JTDS && super.supportsNamedParameters( databaseMetaData ); } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacySqlAstTranslator.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacySqlAstTranslator.java index c4424d5120f5..faaec68593e4 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacySqlAstTranslator.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/SybaseLegacySqlAstTranslator.java @@ -12,6 +12,7 @@ import org.hibernate.LockMode; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.query.sqm.ComparisonOperator; +import org.hibernate.sql.ast.Clause; import org.hibernate.sql.ast.SqlAstNodeRenderingMode; import org.hibernate.sql.ast.spi.AbstractSqlAstTranslator; import org.hibernate.sql.ast.spi.SqlSelection; @@ -25,6 +26,7 @@ import org.hibernate.sql.ast.tree.expression.SqlTuple; import org.hibernate.sql.ast.tree.expression.Summarization; import org.hibernate.sql.ast.tree.from.NamedTableReference; +import org.hibernate.sql.ast.tree.from.UnionTableReference; import org.hibernate.sql.ast.tree.select.QueryPart; import org.hibernate.sql.ast.tree.select.QuerySpec; import org.hibernate.sql.exec.spi.JdbcOperation; @@ -36,6 +38,8 @@ */ public class SybaseLegacySqlAstTranslator extends AbstractSqlAstTranslator { + private static final String UNION_ALL = " union all "; + public SybaseLegacySqlAstTranslator(SessionFactoryImplementor sessionFactory, Statement statement) { super( sessionFactory, statement ); } @@ -99,11 +103,43 @@ protected void visitAnsiCaseSimpleExpression( @Override protected boolean renderNamedTableReference(NamedTableReference tableReference, LockMode lockMode) { - super.renderNamedTableReference( tableReference, lockMode ); + final String tableExpression = tableReference.getTableExpression(); + if ( tableReference instanceof UnionTableReference && lockMode != LockMode.NONE && tableExpression.charAt( 0 ) == '(' ) { + // SQL Server requires to push down the lock hint to the actual table names + int searchIndex = 0; + int unionIndex; + while ( ( unionIndex = tableExpression.indexOf( UNION_ALL, searchIndex ) ) != -1 ) { + append( tableExpression, searchIndex, unionIndex ); + renderLockHint( lockMode ); + appendSql( UNION_ALL ); + searchIndex = unionIndex + UNION_ALL.length(); + } + append( tableExpression, searchIndex, tableExpression.length() - 1 ); + renderLockHint( lockMode ); + appendSql( " )" ); + + registerAffectedTable( tableReference ); + final Clause currentClause = getClauseStack().getCurrent(); + if ( rendersTableReferenceAlias( currentClause ) ) { + final String identificationVariable = tableReference.getIdentificationVariable(); + if ( identificationVariable != null ) { + appendSql( ' ' ); + appendSql( identificationVariable ); + } + } + } + else { + super.renderNamedTableReference( tableReference, lockMode ); + renderLockHint( lockMode ); + } + // Just always return true because SQL Server doesn't support the FOR UPDATE clause + return true; + } + + private void renderLockHint(LockMode lockMode) { if ( LockMode.READ.lessThan( lockMode ) ) { appendSql( " holdlock" ); } - return true; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TeradataDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TeradataDialect.java index fe895548d4e3..deabc41ba7a8 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TeradataDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TeradataDialect.java @@ -618,7 +618,7 @@ public String[] getSqlCreateStrings(Index index, Metadata metadata, SqlStringGen public IdentityColumnSupport getIdentityColumnSupport() { return getVersion().isBefore( 14 ) ? super.getIdentityColumnSupport() - : new Teradata14IdentityColumnSupport(); + : Teradata14IdentityColumnSupport.INSTANCE; } @Override diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TimesTenDialect.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TimesTenDialect.java index 503db268c62f..aa30500db527 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TimesTenDialect.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/TimesTenDialect.java @@ -290,7 +290,7 @@ private String withTimeout(String lockString, int timeout) { case LockOptions.WAIT_FOREVER: return lockString; default: - return supportsWait() ? lockString + " wait " + Math.round( timeout / 1e3f ) : lockString; + return supportsWait() ? lockString + " wait " + getTimeoutInSeconds( timeout ) : lockString; } } diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CUBRIDIdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CUBRIDIdentityColumnSupport.java index 7f6e46ecdb32..0a87e1b8468a 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CUBRIDIdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CUBRIDIdentityColumnSupport.java @@ -12,6 +12,9 @@ * @author Andrea Boriero */ public class CUBRIDIdentityColumnSupport extends IdentityColumnSupportImpl { + + public static final CUBRIDIdentityColumnSupport INSTANCE = new CUBRIDIdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return true; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CacheIdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CacheIdentityColumnSupport.java index 182bea072e04..de04c23f31c8 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CacheIdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/CacheIdentityColumnSupport.java @@ -13,6 +13,9 @@ * @author Andrea Boriero */ public class CacheIdentityColumnSupport extends IdentityColumnSupportImpl { + + public static final CacheIdentityColumnSupport INSTANCE = new CacheIdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return true; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/FirebirdIdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/FirebirdIdentityColumnSupport.java index fb95e544b504..68f1ac657ad8 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/FirebirdIdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/FirebirdIdentityColumnSupport.java @@ -13,6 +13,8 @@ */ public class FirebirdIdentityColumnSupport extends IdentityColumnSupportImpl { + public static final FirebirdIdentityColumnSupport INSTANCE = new FirebirdIdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return true; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/InformixIdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/InformixIdentityColumnSupport.java index cf82523a549b..bb61ed9d2fe4 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/InformixIdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/InformixIdentityColumnSupport.java @@ -15,6 +15,9 @@ * @author Andrea Boriero */ public class InformixIdentityColumnSupport extends IdentityColumnSupportImpl { + + public static final InformixIdentityColumnSupport INSTANCE = new InformixIdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return true; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres10IdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres10IdentityColumnSupport.java index b40ca3fb6ac9..dbbbba18f20a 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres10IdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres10IdentityColumnSupport.java @@ -10,6 +10,9 @@ * @author Andrea Boriero */ public class Ingres10IdentityColumnSupport extends Ingres9IdentityColumnSupport { + + public static final Ingres10IdentityColumnSupport INSTANCE = new Ingres10IdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return true; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres9IdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres9IdentityColumnSupport.java index 733a03583b2d..91310d3f70c7 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres9IdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Ingres9IdentityColumnSupport.java @@ -12,6 +12,9 @@ * @author Andrea Boriero */ public class Ingres9IdentityColumnSupport extends IdentityColumnSupportImpl { + + public static final Ingres9IdentityColumnSupport INSTANCE = new Ingres9IdentityColumnSupport(); + @Override public String getIdentitySelectString(String table, String column, int type) { return "select last_identity()"; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/MimerSQLIdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/MimerSQLIdentityColumnSupport.java index ff01b15c01b6..b65c12b26605 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/MimerSQLIdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/MimerSQLIdentityColumnSupport.java @@ -12,6 +12,9 @@ * @author Andrea Boriero */ public class MimerSQLIdentityColumnSupport extends IdentityColumnSupportImpl { + + public static final MimerSQLIdentityColumnSupport INSTANCE = new MimerSQLIdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return false; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SQLiteIdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SQLiteIdentityColumnSupport.java index 0bdc00b97cec..e30170023826 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SQLiteIdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SQLiteIdentityColumnSupport.java @@ -16,6 +16,8 @@ */ public class SQLiteIdentityColumnSupport extends IdentityColumnSupportImpl { + public static final SQLiteIdentityColumnSupport INSTANCE = new SQLiteIdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return true; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SybaseAnywhereIdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SybaseAnywhereIdentityColumnSupport.java index bec0dc93c832..a06442a5165c 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SybaseAnywhereIdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/SybaseAnywhereIdentityColumnSupport.java @@ -12,6 +12,9 @@ * @author Andrea Boriero */ public class SybaseAnywhereIdentityColumnSupport extends AbstractTransactSQLIdentityColumnSupport { + + public static final SybaseAnywhereIdentityColumnSupport INSTANCE = new SybaseAnywhereIdentityColumnSupport(); + @Override public boolean supportsInsertSelectIdentity() { return false; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Teradata14IdentityColumnSupport.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Teradata14IdentityColumnSupport.java index cd417f4aa3c1..252831dad055 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Teradata14IdentityColumnSupport.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/identity/Teradata14IdentityColumnSupport.java @@ -12,6 +12,9 @@ * @author Andrea Boriero */ public class Teradata14IdentityColumnSupport extends IdentityColumnSupportImpl { + + public static Teradata14IdentityColumnSupport INSTANCE = new Teradata14IdentityColumnSupport(); + @Override public boolean supportsIdentityColumns() { return true; diff --git a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/pagination/SkipFirstLimitHandler.java b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/pagination/SkipFirstLimitHandler.java index 819b7e98f1f1..cf0fca2e9d38 100644 --- a/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/pagination/SkipFirstLimitHandler.java +++ b/hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/pagination/SkipFirstLimitHandler.java @@ -55,7 +55,7 @@ public String processSql(String sql, Limit limit) { } } - return insertAfterSelect( sql, skipFirst.toString() ); + return insertAfterSelect( skipFirst.toString(), sql ); } @Override diff --git a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectFactoryTest.java b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectFactoryTest.java index 1772f28c5a07..8422e8acfef7 100644 --- a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectFactoryTest.java +++ b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectFactoryTest.java @@ -19,6 +19,7 @@ import org.hibernate.service.spi.ServiceRegistryImplementor; import org.hibernate.testing.junit4.BaseUnitTestCase; +import org.hibernate.testing.util.ServiceRegistryUtil; import org.junit.Before; import org.junit.Test; @@ -36,7 +37,7 @@ public void setUp() { final BootstrapServiceRegistry bootReg = new BootstrapServiceRegistryBuilder().applyClassLoader( CommunityDialectFactoryTest.class.getClassLoader() ).build(); - registry = new StandardServiceRegistryBuilder( bootReg ).build(); + registry = ServiceRegistryUtil.serviceRegistryBuilder( bootReg ).build(); dialectFactory = new DialectFactoryImpl(); dialectFactory.injectServices( (ServiceRegistryImplementor) registry ); diff --git a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectSelectorTest.java b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectSelectorTest.java index be8343db653b..a88887460c84 100644 --- a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectSelectorTest.java +++ b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/CommunityDialectSelectorTest.java @@ -17,16 +17,53 @@ public class CommunityDialectSelectorTest { @Test public void verifyAllDialectNamingResolve() { + testDialectNamingResolution( DB297Dialect.class ); + testDialectNamingResolution( DB2390Dialect.class ); + testDialectNamingResolution( DB2390V8Dialect.class ); + testDialectNamingResolution( Cache71Dialect.class ); testDialectNamingResolution( CUBRIDDialect.class ); + + testDialectNamingResolution( DerbyTenFiveDialect.class ); + testDialectNamingResolution( DerbyTenSixDialect.class ); + testDialectNamingResolution( DerbyTenSevenDialect.class ); + testDialectNamingResolution( FirebirdDialect.class ); testDialectNamingResolution( InformixDialect.class ); testDialectNamingResolution( IngresDialect.class ); testDialectNamingResolution( Ingres9Dialect.class ); testDialectNamingResolution( Ingres10Dialect.class ); testDialectNamingResolution( MimerSQLDialect.class ); + + testDialectNamingResolution( MariaDB53Dialect.class ); + testDialectNamingResolution( MariaDB10Dialect.class ); + testDialectNamingResolution( MariaDB102Dialect.class ); + + testDialectNamingResolution( MySQL5Dialect.class ); + testDialectNamingResolution( MySQL55Dialect.class ); + + testDialectNamingResolution( Oracle8iDialect.class ); + testDialectNamingResolution( Oracle9iDialect.class ); + testDialectNamingResolution( Oracle10gDialect.class ); + + testDialectNamingResolution( PostgreSQL81Dialect.class ); + testDialectNamingResolution( PostgreSQL82Dialect.class ); + testDialectNamingResolution( PostgreSQL9Dialect.class ); + testDialectNamingResolution( PostgreSQL91Dialect.class ); + testDialectNamingResolution( PostgreSQL92Dialect.class ); + testDialectNamingResolution( PostgreSQL93Dialect.class ); + testDialectNamingResolution( PostgreSQL94Dialect.class ); + testDialectNamingResolution( PostgreSQL95Dialect.class ); + testDialectNamingResolution( SAPDBDialect.class ); + + testDialectNamingResolution( SQLServer2005Dialect.class ); + testDialectNamingResolution( SybaseAnywhereDialect.class ); + testDialectNamingResolution( Sybase11Dialect.class ); + testDialectNamingResolution( SybaseASE15Dialect.class ); + testDialectNamingResolution( SybaseASE157Dialect.class ); + testDialectNamingResolution( TeradataDialect.class ); testDialectNamingResolution( TimesTenDialect.class ); } diff --git a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/InformixDialectTestCase.java b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/InformixDialectTestCase.java deleted file mode 100644 index 0ef6f369a7cd..000000000000 --- a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/InformixDialectTestCase.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.community.dialect; - -import org.hibernate.boot.registry.StandardServiceRegistry; -import org.hibernate.boot.registry.StandardServiceRegistryBuilder; -import org.hibernate.metamodel.model.domain.internal.JpaMetamodelImpl; -import org.hibernate.metamodel.model.domain.internal.MappingMetamodelImpl; -import org.hibernate.metamodel.spi.MappingMetamodelImplementor; -import org.hibernate.query.criteria.ValueHandlingMode; -import org.hibernate.query.internal.NamedObjectRepositoryImpl; -import org.hibernate.query.spi.QueryEngine; -import org.hibernate.query.sqm.function.SelfRenderingSqmFunction; -import org.hibernate.query.sqm.function.SqmFunctionDescriptor; -import org.hibernate.sql.ast.spi.SqlAppender; -import org.hibernate.sql.ast.spi.StringBuilderSqlAppender; -import org.hibernate.testing.boot.MetadataBuildingContextTestingImpl; -import org.hibernate.type.BasicType; -import org.hibernate.type.descriptor.java.JdbcDateJavaType; -import org.hibernate.type.descriptor.java.JdbcTimestampJavaType; -import org.hibernate.type.descriptor.jdbc.DateJdbcType; -import org.hibernate.type.descriptor.jdbc.TimestampJdbcType; -import org.hibernate.type.spi.TypeConfiguration; - -import org.hibernate.testing.TestForIssue; -import org.hibernate.testing.junit4.BaseUnitTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static org.hibernate.engine.query.internal.NativeQueryInterpreterStandardImpl.NATIVE_QUERY_INTERPRETER; -import static org.junit.Assert.assertEquals; - -/** - * Testing of patched support for Informix boolean type; see HHH-9894, HHH-10800 - * - * @author Greg Jones - */ -public class InformixDialectTestCase extends BaseUnitTestCase { - - private static final InformixDialect dialect = new InformixDialect(); - private static StandardServiceRegistry ssr; - private static QueryEngine queryEngine; - private static MappingMetamodelImplementor mappingMetamodel; - private static TypeConfiguration typeConfiguration; - - @BeforeClass - public static void init() { - ssr = new StandardServiceRegistryBuilder().build(); - typeConfiguration = new TypeConfiguration(); - typeConfiguration.scope( new MetadataBuildingContextTestingImpl( ssr ) ); - mappingMetamodel = new MappingMetamodelImpl( typeConfiguration, ssr ); - final JpaMetamodelImpl jpaMetamodel = new JpaMetamodelImpl( typeConfiguration, mappingMetamodel, ssr ); - queryEngine = new QueryEngine( - null, - null, - jpaMetamodel, - ValueHandlingMode.BIND, - dialect.getPreferredSqlTypeCodeForBoolean(), - false, - new NamedObjectRepositoryImpl( emptyMap(), emptyMap(), emptyMap(), emptyMap() ), - NATIVE_QUERY_INTERPRETER, - dialect, - ssr - ); - } - - @AfterClass - public static void tearDown() { - queryEngine.close(); - ssr.close(); - } - - @Test - @TestForIssue(jiraKey = "HHH-9894") - public void testToBooleanValueStringTrue() { - assertEquals( "'t'", dialect.toBooleanValueString( true ) ); - } - - @Test - @TestForIssue(jiraKey = "HHH-9894") - public void testToBooleanValueStringFalse() { - assertEquals( "'f'", dialect.toBooleanValueString( false ) ); - } - - @Test - @TestForIssue(jiraKey = "HHH-10800") - public void testCurrentTimestampFunction() { - SqmFunctionDescriptor functionDescriptor = queryEngine.getSqmFunctionRegistry() - .findFunctionDescriptor( "current_timestamp" ); - SelfRenderingSqmFunction sqmExpression = - functionDescriptor.generateSqmExpression( null, queryEngine, typeConfiguration ); - BasicType basicType = (BasicType) sqmExpression.getNodeType(); - assertEquals( JdbcTimestampJavaType.INSTANCE, basicType.getJavaTypeDescriptor() ); - assertEquals( TimestampJdbcType.INSTANCE, basicType.getJdbcType() ); - - SqlAppender appender = new StringBuilderSqlAppender(); - sqmExpression.getRenderingSupport().render( appender, emptyList(), null ); - assertEquals( "current", appender.toString() ); - } - - @Test - @TestForIssue(jiraKey = "HHH-10800") - public void testCurrentDateFunction() { - SqmFunctionDescriptor functionDescriptor = queryEngine.getSqmFunctionRegistry() - .findFunctionDescriptor( "current_date" ); - SelfRenderingSqmFunction sqmExpression = - functionDescriptor.generateSqmExpression( null, queryEngine, typeConfiguration ); - BasicType basicType = (BasicType) sqmExpression.getNodeType(); - assertEquals( JdbcDateJavaType.INSTANCE, basicType.getJavaTypeDescriptor() ); - assertEquals( DateJdbcType.INSTANCE, basicType.getJdbcType() ); - - SqlAppender appender = new StringBuilderSqlAppender(); - sqmExpression.getRenderingSupport().render( appender, emptyList(), null ); - assertEquals( "today", appender.toString() ); - } - -} diff --git a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/SQLServer2005DialectTestCase.java b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/SQLServer2005DialectTestCase.java index 4ed8e0c393be..637f0563b81a 100644 --- a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/SQLServer2005DialectTestCase.java +++ b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/SQLServer2005DialectTestCase.java @@ -11,7 +11,6 @@ import org.hibernate.LockMode; import org.hibernate.LockOptions; import org.hibernate.dialect.DatabaseVersion; -import org.hibernate.dialect.SQLServer2005Dialect; import org.hibernate.query.spi.Limit; import org.hibernate.testing.TestForIssue; diff --git a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/functional/cache/SQLFunctionsInterSystemsTest.java b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/functional/cache/SQLFunctionsInterSystemsTest.java index e122ea95eb8d..4388d7cb4bb0 100644 --- a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/functional/cache/SQLFunctionsInterSystemsTest.java +++ b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/functional/cache/SQLFunctionsInterSystemsTest.java @@ -455,10 +455,10 @@ public void testSQLFunctions() { q.setParameter( "count", new Integer( -1 ) ); assertTrue( q.list().size() == 1 ); - ScrollableResults sr = s.createQuery( "from Simple s" ).scroll(); - sr.next(); - sr.get(); - sr.close(); + try (ScrollableResults sr = s.createQuery( "from Simple s" ).scroll()) { + sr.next(); + sr.get(); + } s.delete( other ); s.delete( simple ); diff --git a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/unit/lockhint/SybaseASE15LockHintsTest.java b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/unit/lockhint/SybaseASE15LockHintsTest.java index 3cbc8d07d880..b7898993d78f 100644 --- a/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/unit/lockhint/SybaseASE15LockHintsTest.java +++ b/hibernate-community-dialects/src/test/java/org/hibernate/community/dialect/unit/lockhint/SybaseASE15LockHintsTest.java @@ -8,7 +8,6 @@ import org.hibernate.community.dialect.SybaseASELegacyDialect; import org.hibernate.dialect.Dialect; -import org.hibernate.dialect.SybaseASE15Dialect; import org.hibernate.orm.test.dialect.unit.lockhint.AbstractLockHintTest; /** diff --git a/hibernate-core/hibernate-core.gradle b/hibernate-core/hibernate-core.gradle index 415619121809..2d81078036a9 100644 --- a/hibernate-core/hibernate-core.gradle +++ b/hibernate-core/hibernate-core.gradle @@ -10,13 +10,16 @@ plugins { id 'org.hibernate.build.xjc-jakarta' } +repositories { + gradlePluginPortal() +} + description = 'Hibernate\'s core ORM functionality' apply from: rootProject.file( 'gradle/published-java-module.gradle' ) apply plugin: 'org.hibernate.orm.antlr' apply plugin: 'org.hibernate.matrix-test' - configurations { tests { description = 'Configuration for the produced test jar' @@ -45,6 +48,7 @@ dependencies { compileOnly jakartaLibs.jsonbApi compileOnly libs.jackson compileOnly libs.jacksonXml + compileOnly dbLibs.postgresql testImplementation project(':hibernate-testing') testImplementation project(':hibernate-ant') @@ -66,7 +70,10 @@ dependencies { testRuntimeOnly libs.byteBuddy testRuntimeOnly testLibs.weld testRuntimeOnly testLibs.wildFlyTxnClient + testRuntimeOnly jakartaLibs.jsonb testRuntimeOnly libs.jackson + testRuntimeOnly libs.jacksonXml + testRuntimeOnly libs.jacksonJsr310 testAnnotationProcessor project( ':hibernate-jpamodelgen' ) @@ -237,13 +244,16 @@ tasks.withType( Test.class ).each { test -> test.jvmArgs( ['--add-opens', 'java.base/java.security=ALL-UNNAMED'] ) test.jvmArgs( ['--add-opens', 'java.base/java.lang=ALL-UNNAMED'] ) + //Avoid Log4J2 classloader leaks: + test.jvmArgs( ['-Dlog4j2.disableJmx=true'] ) + test.beforeTest { descriptor -> //println "Starting test: " + descriptor } } // Tests with records -if ( gradle.ext.javaVersions.test.release.asInt() >= 17 && gradle.ext.javaToolchainEnabled ) { +if ( jdkVersions.test.release.asInt() >= 17 && jdkVersions.explicit ) { // Add a new source set, which contains tests that can run on JDK17+ sourceSets { @@ -253,7 +263,7 @@ if ( gradle.ext.javaVersions.test.release.asInt() >= 17 && gradle.ext.javaToolch } // Refer to the main test resources to avoid processing variables twice resources { - srcDir 'target/resources/test' + srcDirs tasks.processTestResources } } } @@ -261,7 +271,7 @@ if ( gradle.ext.javaVersions.test.release.asInt() >= 17 && gradle.ext.javaToolch // For the new source set, we need to configure the source and target version to 17 compileTestJava17Java { javaCompiler = javaToolchains.compilerFor { - languageVersion = gradle.ext.javaVersions.test.compiler + languageVersion = jdkVersions.test.compiler } sourceCompatibility = 17 targetCompatibility = 17 @@ -272,6 +282,7 @@ if ( gradle.ext.javaVersions.test.release.asInt() >= 17 && gradle.ext.javaToolch // The source set gets a custom configuration which extends the normal test implementation config configurations { testJava17Implementation.extendsFrom(testImplementation, testRuntimeOnly) + testJava17CompileOnly.extendsFrom(testCompileOnly) } // Add the output from src/main/java as dependency @@ -284,13 +295,13 @@ if ( gradle.ext.javaVersions.test.release.asInt() >= 17 && gradle.ext.javaToolch // We execute the Java 17 tests in a custom test task task testJava17(type: Test) { javaLauncher = javaToolchains.launcherFor { - languageVersion = gradle.ext.javaVersions.test.launcher + languageVersion = jdkVersions.test.launcher } useJUnitPlatform() testClassesDirs = sourceSets.testJava17.output.classesDirs classpath = sourceSets.testJava17.runtimeClasspath - if ( gradle.ext.javaVersions.test.launcher.asInt() >= 19 ) { + if ( jdkVersions.test.launcher.asInt() >= 19 ) { logger.warn( "The version of Java bytecode that will be tested is not supported by Bytebuddy by default. " + " Setting 'net.bytebuddy.experimental=true'." ) systemProperty 'net.bytebuddy.experimental', true @@ -302,9 +313,13 @@ if ( gradle.ext.javaVersions.test.release.asInt() >= 17 && gradle.ext.javaToolch check.dependsOn testJava17 } -javadoc { +tasks.named( "javadoc", Javadoc ) { configure(options) { - overview = 'src/main/javadoc/overview.html' - stylesheetFile = new File(projectDir, 'src/main/javadoc/stylesheet.css') + overview = rootProject.file( "shared/javadoc/overview.html" ) } -} \ No newline at end of file +} + +tasks.sourcesJar.dependsOn ':hibernate-core:generateGraphParser' +tasks.sourcesJar.dependsOn ':hibernate-core:generateHqlParser' +tasks.sourcesJar.dependsOn ':hibernate-core:generateSqlScriptParser' +tasks.sourcesJar.dependsOn ':hibernate-core:generateOrderingParser' diff --git a/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlLexer.g4 b/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlLexer.g4 index 96870f50d885..0c0203728f2a 100644 --- a/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlLexer.g4 +++ b/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlLexer.g4 @@ -181,6 +181,7 @@ ELSE : [eE] [lL] [sS] [eE]; EMPTY : [eE] [mM] [pP] [tT] [yY]; END : [eE] [nN] [dD]; ENTRY : [eE] [nN] [tT] [rR] [yY]; +EPOCH : [eE] [pP] [oO] [cC] [hH]; ERROR : [eE] [rR] [rR] [oO] [rR]; ESCAPE : [eE] [sS] [cC] [aA] [pP] [eE]; EVERY : [eE] [vV] [eE] [rR] [yY]; @@ -289,6 +290,7 @@ TO : [tT] [oO]; TRAILING : [tT] [rR] [aA] [iI] [lL] [iI] [nN] [gG]; TREAT : [tT] [rR] [eE] [aA] [tT]; TRIM : [tT] [rR] [iI] [mM]; +TRUNC : [tT] [rR] [uU] [nN] [cC]; TRUNCATE : [tT] [rR] [uU] [nN] [cC] [aA] [tT] [eE]; TYPE : [tT] [yY] [pP] [eE]; UNBOUNDED : [uU] [nN] [bB] [oO] [uU] [nN] [dD] [eE] [dD]; diff --git a/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlParser.g4 b/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlParser.g4 index 260cc129fef8..7346970e9621 100644 --- a/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlParser.g4 +++ b/hibernate-core/src/main/antlr/org/hibernate/grammars/hql/HqlParser.g4 @@ -1051,7 +1051,8 @@ jpaNonstandardFunctionName * The function name, followed by a parenthesized list of comma-separated expressions */ genericFunction - : genericFunctionName LEFT_PAREN (genericFunctionArguments | ASTERISK)? RIGHT_PAREN nthSideClause? nullsClause? withinGroupClause? filterClause? overClause? + : genericFunctionName LEFT_PAREN (genericFunctionArguments | ASTERISK)? RIGHT_PAREN + nthSideClause? nullsClause? withinGroupClause? filterClause? overClause? ; /** @@ -1145,7 +1146,8 @@ anyFunction * The 'listagg()' ordered set-aggregate function */ listaggFunction - : LISTAGG LEFT_PAREN DISTINCT? expressionOrPredicate COMMA expressionOrPredicate onOverflowClause? RIGHT_PAREN withinGroupClause? filterClause? overClause? + : LISTAGG LEFT_PAREN DISTINCT? expressionOrPredicate COMMA expressionOrPredicate onOverflowClause? RIGHT_PAREN + withinGroupClause? filterClause? overClause? ; /** @@ -1211,9 +1213,9 @@ frameClause * The start of the window content */ frameStart - : UNBOUNDED PRECEDING + : CURRENT ROW + | UNBOUNDED PRECEDING | expression PRECEDING - | CURRENT ROW | expression FOLLOWING ; @@ -1221,10 +1223,10 @@ frameStart * The end of the window content */ frameEnd - : expression PRECEDING - | CURRENT ROW - | expression FOLLOWING + : CURRENT ROW | UNBOUNDED FOLLOWING + | expression PRECEDING + | expression FOLLOWING ; /** @@ -1245,6 +1247,7 @@ frameExclusion standardFunction : castFunction | extractFunction + | truncFunction | formatFunction | collateFunction | substringFunction @@ -1452,6 +1455,13 @@ extractFunction | datetimeField LEFT_PAREN expression RIGHT_PAREN ; +/** + * The 'trunc()' function for truncating both numeric and datetime values + */ +truncFunction + : (TRUNC | TRUNCATE) LEFT_PAREN expression (COMMA (datetimeField | expression))? RIGHT_PAREN + ; + /** * A field that may be extracted from a date, time, or datetime */ @@ -1473,6 +1483,7 @@ datetimeField | MINUTE | SECOND | NANOSECOND + | EPOCH ; dayField @@ -1577,6 +1588,7 @@ rollup | EMPTY | END | ENTRY + | EPOCH | ERROR | ESCAPE | EVERY @@ -1686,6 +1698,7 @@ rollup | TRAILING | TREAT | TRIM + | TRUNC | TRUNCATE | TYPE | UNBOUNDED diff --git a/hibernate-core/src/main/antlr/org/hibernate/grammars/importsql/SqlScriptParser.g4 b/hibernate-core/src/main/antlr/org/hibernate/grammars/importsql/SqlScriptParser.g4 index 1296eb057daa..a996dfa07eaf 100644 --- a/hibernate-core/src/main/antlr/org/hibernate/grammars/importsql/SqlScriptParser.g4 +++ b/hibernate-core/src/main/antlr/org/hibernate/grammars/importsql/SqlScriptParser.g4 @@ -15,7 +15,7 @@ package org.hibernate.grammars.importsql; } script - : (NEWLINE | SPACE | TAB)* ( commandBlock (NEWLINE | SPACE | TAB)* )+ EOF + : (NEWLINE | SPACE | TAB)* ( commandBlock (NEWLINE | SPACE | TAB)* )* EOF ; commandBlock diff --git a/hibernate-core/src/main/java/org/hibernate/Cache.java b/hibernate-core/src/main/java/org/hibernate/Cache.java index dbb39429c000..3e6b744496fc 100644 --- a/hibernate-core/src/main/java/org/hibernate/Cache.java +++ b/hibernate-core/src/main/java/org/hibernate/Cache.java @@ -49,7 +49,7 @@ *

* Very occasionally, it's necessary or advantageous to control the cache explicitly * via programmatic eviction, using, for example, {@link #evictEntityData(Class)} to - * evicts a whole cache region, or {@link #evictEntityData(Class, Object)}, to evict + * evict a whole cache region, or {@link #evictEntityData(Class, Object)}, to evict * a single item. *

* If multiple entities or roles are mapped to the same cache region, they share diff --git a/hibernate-core/src/main/java/org/hibernate/FlushMode.java b/hibernate-core/src/main/java/org/hibernate/FlushMode.java index 54f0a5f6934d..07e6583be210 100644 --- a/hibernate-core/src/main/java/org/hibernate/FlushMode.java +++ b/hibernate-core/src/main/java/org/hibernate/FlushMode.java @@ -34,20 +34,27 @@ public enum FlushMode { /** * The {@link Session} is flushed when {@link Transaction#commit()} - * is called. + * is called. It is never automatically flushed before query + * execution. + * + * @see FlushModeType#COMMIT */ COMMIT, /** - * The {@link Session} is sometimes flushed before query execution - * in order to ensure that queries never return stale state. This - * is the default flush mode. + * The {@link Session} is flushed when {@link Transaction#commit()} + * is called, and is sometimes flushed before query execution in + * order to ensure that queries never return stale state. This is + * the default flush mode. + * + * @see FlushModeType#AUTO */ AUTO, /** - * The {@link Session} is flushed before every query. This is almost - * always unnecessary and inefficient. + * The {@link Session} is flushed when {@link Transaction#commit()} + * is called and before every query. This is usually unnecessary and + * inefficient. */ ALWAYS; diff --git a/hibernate-core/src/main/java/org/hibernate/Hibernate.java b/hibernate-core/src/main/java/org/hibernate/Hibernate.java index 3cbbac50ff70..b945f794745c 100644 --- a/hibernate-core/src/main/java/org/hibernate/Hibernate.java +++ b/hibernate-core/src/main/java/org/hibernate/Hibernate.java @@ -233,57 +233,6 @@ public static T get(List list, int key) { : list.get(key); } - /** - * Remove the value associated with the given key by the given persistent - * map, without fetching the state of the map from the database. - * - * @param map a persistent map associated with an open session - * @param key a key belonging to the map - * @return the previous value associated by the map with the given key, or null if there was no value associated - * with the given key - * - * @since 6.2.0 - */ - public static V remove(Map map, K key) { - return ( map instanceof PersistentMap ) - ? ( (PersistentMap) map ).queuedRemove( key ) - : map.remove( key ); - } - - /** - * Remove the specified element of the given persistent set, - * without fetching the state of the set from the database. - * - * @param set a persistent set associated with an open session - * @param element an element belonging to the set - * @return true if this set contained the specified element - * - * @since 6.2.0 - */ - public static boolean remove(Set set, T element) { - return ( set instanceof PersistentSet ) - ? ( (PersistentSet) set ).queuedRemove( element ) - : set.remove( element ); - } - - /** - * Remove the specified element of the given persistent list, - * without fetching the state of the list from the database. - * - * @param list a persistent list associated with an open session - * @param element an element belonging to the list - * @return true if this list contained the specified element - * - * @since 6.2.0 - */ - public static boolean remove(List list, T element) { - return ( list instanceof PersistentList ) - ? ( (PersistentList) list ).queuedRemove( element ) - : ( list instanceof PersistentBag ) - ? ( (PersistentBag) list ).queuedRemove( element ) - : list.remove( element ); - } - /** * Get the true, underlying class of a proxied entity. This operation will * initialize a proxy by side effect. diff --git a/hibernate-core/src/main/java/org/hibernate/Incubating.java b/hibernate-core/src/main/java/org/hibernate/Incubating.java index 55fc7a87e2e1..0a17f43d3700 100644 --- a/hibernate-core/src/main/java/org/hibernate/Incubating.java +++ b/hibernate-core/src/main/java/org/hibernate/Incubating.java @@ -6,6 +6,7 @@ */ package org.hibernate; +import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.Target; @@ -30,5 +31,6 @@ */ @Target({PACKAGE, TYPE, ANNOTATION_TYPE, METHOD, FIELD, CONSTRUCTOR}) @Retention(RUNTIME) +@Documented public @interface Incubating { } diff --git a/hibernate-core/src/main/java/org/hibernate/Internal.java b/hibernate-core/src/main/java/org/hibernate/Internal.java index ec182ba9025c..7c3e7601b37d 100644 --- a/hibernate-core/src/main/java/org/hibernate/Internal.java +++ b/hibernate-core/src/main/java/org/hibernate/Internal.java @@ -6,6 +6,7 @@ */ package org.hibernate; +import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.Target; @@ -29,6 +30,7 @@ */ @Target({PACKAGE, TYPE, METHOD, FIELD, CONSTRUCTOR}) @Retention(RUNTIME) +@Documented public @interface Internal { } diff --git a/hibernate-core/src/main/java/org/hibernate/PropertySetterAccessException.java b/hibernate-core/src/main/java/org/hibernate/PropertySetterAccessException.java index 0f0783b2acd7..843516d14829 100644 --- a/hibernate-core/src/main/java/org/hibernate/PropertySetterAccessException.java +++ b/hibernate-core/src/main/java/org/hibernate/PropertySetterAccessException.java @@ -8,6 +8,8 @@ import java.util.Collection; +import org.hibernate.proxy.HibernateProxy; + /** * @author Steve Ebersole */ @@ -47,7 +49,7 @@ public PropertySetterAccessException( } public static String loggablePropertyValueString(Object value) { - if ( value instanceof Collection ) { + if ( value instanceof Collection || value instanceof HibernateProxy ) { return value.getClass().getSimpleName(); } return value.toString(); diff --git a/hibernate-core/src/main/java/org/hibernate/Remove.java b/hibernate-core/src/main/java/org/hibernate/Remove.java index 38b3ec2d24c3..64a2d206c79c 100644 --- a/hibernate-core/src/main/java/org/hibernate/Remove.java +++ b/hibernate-core/src/main/java/org/hibernate/Remove.java @@ -6,6 +6,7 @@ */ package org.hibernate; +import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.Target; @@ -33,5 +34,6 @@ */ @Target({METHOD, FIELD, TYPE, PACKAGE, CONSTRUCTOR, TYPE_PARAMETER, TYPE_USE}) @Retention(RUNTIME) +@Documented public @interface Remove { } diff --git a/hibernate-core/src/main/java/org/hibernate/TimeZoneStorageStrategy.java b/hibernate-core/src/main/java/org/hibernate/TimeZoneStorageStrategy.java index 2231352f1cf5..2ff4618237e2 100644 --- a/hibernate-core/src/main/java/org/hibernate/TimeZoneStorageStrategy.java +++ b/hibernate-core/src/main/java/org/hibernate/TimeZoneStorageStrategy.java @@ -28,7 +28,18 @@ public enum TimeZoneStorageStrategy { */ COLUMN, /** - * Doesn't store the time zone, but instead normalizes to the JDBC timezone. + * Does not store the time zone, and instead: + *

    + *
  • when persisting to the database, normalizes JDBC timestamps to the + * {@linkplain org.hibernate.cfg.AvailableSettings#JDBC_TIME_ZONE} + * or to the JVM default time zone otherwise. + *
  • when reading back from the database, sets the offset or zone + * of {@code OffsetDateTime}/{@code ZonedDateTime} properties + * to the JVM default time zone. + *
+ *

+ * Provided partly for backward compatibility with older + * versions of Hibernate. */ NORMALIZE, /** diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/AbstractEntityInsertAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/AbstractEntityInsertAction.java index bb22b8379dc9..e9865268dc08 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/AbstractEntityInsertAction.java +++ b/hibernate-core/src/main/java/org/hibernate/action/internal/AbstractEntityInsertAction.java @@ -18,8 +18,11 @@ import org.hibernate.engine.spi.PersistenceContext; import org.hibernate.engine.spi.Status; import org.hibernate.event.spi.EventSource; +import org.hibernate.metamodel.mapping.AttributeMapping; +import org.hibernate.metamodel.mapping.AttributeMappingsList; import org.hibernate.metamodel.mapping.NaturalIdMapping; import org.hibernate.metamodel.mapping.PluralAttributeMapping; +import org.hibernate.metamodel.mapping.internal.EmbeddedAttributeMapping; import org.hibernate.persister.collection.CollectionPersister; import org.hibernate.persister.entity.AbstractEntityPersister; import org.hibernate.persister.entity.EntityPersister; @@ -148,19 +151,65 @@ public final void makeEntityManaged() { protected void addCollectionsByKeyToPersistenceContext(PersistenceContext persistenceContext, Object[] objects) { for ( int i = 0; i < objects.length; i++ ) { - if ( objects[i] instanceof PersistentCollection ) { - final PersistentCollection persistentCollection = (PersistentCollection) objects[i]; - final CollectionPersister collectionPersister = ( (PluralAttributeMapping) getPersister().getAttributeMapping( i ) ).getCollectionDescriptor(); - final CollectionKey collectionKey = new CollectionKey( - collectionPersister, - ( (AbstractEntityPersister) getPersister() ).getCollectionKey( - collectionPersister, - getInstance(), - persistenceContext.getEntry( getInstance() ), - getSession() - ) + final AttributeMapping attributeMapping = getPersister().getAttributeMapping( i ); + if ( attributeMapping.isEmbeddedAttributeMapping() ) { + visitEmbeddedAttributeMapping( + attributeMapping.asEmbeddedAttributeMapping(), + objects[i], + persistenceContext ); - persistenceContext.addCollectionByKey( collectionKey, persistentCollection ); + } + else if ( attributeMapping.isPluralAttributeMapping() ) { + addCollectionKey( + attributeMapping.asPluralAttributeMapping(), + objects[i], + persistenceContext + ); + } + } + } + + private void visitEmbeddedAttributeMapping( + EmbeddedAttributeMapping attributeMapping, + Object object, + PersistenceContext persistenceContext) { + if ( object != null ) { + final AttributeMappingsList attributeMappings = attributeMapping.getEmbeddableTypeDescriptor().getAttributeMappings(); + for ( int i = 0; i < attributeMappings.size(); i++ ) { + final AttributeMapping attribute = attributeMappings.get( i ); + if ( attribute.isPluralAttributeMapping() ) { + addCollectionKey( + attribute.asPluralAttributeMapping(), + attribute.getValue( object ), + persistenceContext + ); + } + else if ( attribute.isEmbeddedAttributeMapping() ) { + visitEmbeddedAttributeMapping( + attribute.asEmbeddedAttributeMapping(), + attribute.getValue( object ), + persistenceContext + ); + } + } + } + } + + private void addCollectionKey( + PluralAttributeMapping pluralAttributeMapping, + Object o, + PersistenceContext persistenceContext) { + if ( o instanceof PersistentCollection ) { + final CollectionPersister collectionPersister = pluralAttributeMapping.getCollectionDescriptor(); + final Object key = ( (AbstractEntityPersister) getPersister() ).getCollectionKey( + collectionPersister, + getInstance(), + persistenceContext.getEntry( getInstance() ), + getSession() + ); + if ( key != null ) { + final CollectionKey collectionKey = new CollectionKey( collectionPersister, key ); + persistenceContext.addCollectionByKey( collectionKey, (PersistentCollection) o ); } } } diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionAction.java index ed58820fa1c1..3c1ba30737ef 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionAction.java +++ b/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionAction.java @@ -10,11 +10,11 @@ import org.hibernate.action.spi.AfterTransactionCompletionProcess; import org.hibernate.action.spi.BeforeTransactionCompletionProcess; -import org.hibernate.action.spi.Executable; import org.hibernate.cache.CacheException; import org.hibernate.cache.spi.access.CollectionDataAccess; import org.hibernate.cache.spi.access.SoftLock; import org.hibernate.collection.spi.PersistentCollection; +import org.hibernate.engine.spi.ComparableExecutable; import org.hibernate.engine.spi.SharedSessionContractImplementor; import org.hibernate.event.spi.EventSource; import org.hibernate.internal.FastSessionServices; @@ -27,7 +27,7 @@ * * @author Gavin King */ -public abstract class CollectionAction implements Executable, Serializable, Comparable { +public abstract class CollectionAction implements ComparableExecutable { private transient CollectionPersister persister; private transient EventSource session; @@ -122,6 +122,16 @@ protected final Object getKey() { return finalKey; } + @Override + public String getPrimarySortClassifier() { + return collectionRole; + } + + @Override + public Object getSecondarySortIndex() { + return key; + } + protected final EventSource getSession() { return session; } @@ -145,15 +155,15 @@ public String toString() { } @Override - public int compareTo(CollectionAction action) { + public int compareTo(ComparableExecutable o) { // sort first by role name - final int roleComparison = collectionRole.compareTo( action.collectionRole ); + final int roleComparison = collectionRole.compareTo( o.getPrimarySortClassifier() ); if ( roleComparison != 0 ) { return roleComparison; } else { //then by fk - return persister.getAttributeMapping().getKeyDescriptor().compare( key, action.key ); + return persister.getAttributeMapping().getKeyDescriptor().compare( key, o.getSecondarySortIndex() ); // //noinspection unchecked // final JavaType javaType = (JavaType) persister.getAttributeMapping().getKeyDescriptor().getJavaType(); // return javaType.getComparator().compare( key, action.key ); diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionRemoveAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionRemoveAction.java index e0df7cb3c350..ac7f498a4fa5 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionRemoveAction.java +++ b/hibernate-core/src/main/java/org/hibernate/action/internal/CollectionRemoveAction.java @@ -154,4 +154,7 @@ private PostCollectionRemoveEvent newPostCollectionRemoveEvent() { ); } + public Object getAffectedOwner() { + return affectedOwner; + } } diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/ComparableEntityAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/ComparableEntityAction.java deleted file mode 100644 index cab7fdcbecb6..000000000000 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/ComparableEntityAction.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Hibernate, Relational Persistence for Idiomatic Java - * - * License: GNU Lesser General Public License (LGPL), version 2.1 or later. - * See the lgpl.txt file in the root directory or . - */ -package org.hibernate.action.internal; - -/** - * With this interface we can compare entity actions in the queue - * even if the implementation doesn't extend {@link EntityAction}. - */ -public interface ComparableEntityAction extends Comparable { - String getEntityName(); - - Object getId(); -} diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityAction.java index 363ae0d06beb..203c0f31b38d 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityAction.java +++ b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityAction.java @@ -11,7 +11,7 @@ import org.hibernate.AssertionFailure; import org.hibernate.action.spi.AfterTransactionCompletionProcess; import org.hibernate.action.spi.BeforeTransactionCompletionProcess; -import org.hibernate.action.spi.Executable; +import org.hibernate.engine.spi.ComparableExecutable; import org.hibernate.engine.spi.EntityEntry; import org.hibernate.event.spi.EventSource; import org.hibernate.internal.FastSessionServices; @@ -26,7 +26,7 @@ * @author Gavin King */ public abstract class EntityAction - implements ComparableEntityAction, Executable, Serializable, AfterTransactionCompletionProcess { + implements ComparableExecutable, AfterTransactionCompletionProcess { private final String entityName; private final Object id; @@ -151,13 +151,23 @@ public String toString() { } @Override - public int compareTo(ComparableEntityAction action) { + public int compareTo(ComparableExecutable o) { //sort first by entity name - final int roleComparison = entityName.compareTo( action.getEntityName() ); + final int roleComparison = entityName.compareTo( o.getPrimarySortClassifier() ); return roleComparison != 0 ? roleComparison //then by id - : persister.getIdentifierType().compare( id, action.getId(), session.getSessionFactory() ); + : persister.getIdentifierType().compare( id, o.getSecondarySortIndex(), session.getSessionFactory() ); + } + + @Override + public String getPrimarySortClassifier() { + return entityName; + } + + @Override + public Object getSecondarySortIndex() { + return id; } /** diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityDeleteAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityDeleteAction.java index f83a61c42e0d..425ac42f9071 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityDeleteAction.java +++ b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityDeleteAction.java @@ -145,7 +145,7 @@ public void execute() throws HibernateException { } } - private Object getCurrentVersion() { + protected Object getCurrentVersion() { return getPersister().isVersionPropertyGenerated() // skip if we're deleting an unloaded proxy, no need for the version && isInstanceLoaded() @@ -156,7 +156,7 @@ && isInstanceLoaded() : version; } - private void postDeleteLoaded( + protected void postDeleteLoaded( Object id, EntityPersister persister, SharedSessionContractImplementor session, @@ -171,7 +171,7 @@ private void postDeleteLoaded( throw new AssertionFailure( "possible non-threadsafe access to session" ); } entry.postDelete(); - EntityKey key = entry.getEntityKey(); + final EntityKey key = entry.getEntityKey(); persistenceContext.removeEntity( key ); persistenceContext.removeProxy( key ); removeCacheItem( ck ); @@ -179,9 +179,9 @@ private void postDeleteLoaded( postDelete(); } - private void postDeleteUnloaded(Object id, EntityPersister persister, SharedSessionContractImplementor session, Object ck) { + protected void postDeleteUnloaded(Object id, EntityPersister persister, SharedSessionContractImplementor session, Object ck) { final PersistenceContext persistenceContext = session.getPersistenceContextInternal(); - EntityKey key = session.generateEntityKey( id, persister ); + final EntityKey key = session.generateEntityKey( id, persister ); if ( !persistenceContext.containsDeletedUnloadedEntityKey( key ) ) { throw new AssertionFailure( "deleted proxy should be for an unloaded entity: " + key ); } @@ -257,12 +257,17 @@ protected boolean hasPostCommitEventListeners() { return false; } - private Object lockCacheItem() { + protected Object lockCacheItem() { final EntityPersister persister = getPersister(); if ( persister.canWriteToCache() ) { final EntityDataAccess cache = persister.getCacheAccessStrategy(); final SharedSessionContractImplementor session = getSession(); - Object ck = cache.generateCacheKey( getId(), persister, session.getFactory(), session.getTenantIdentifier() ); + final Object ck = cache.generateCacheKey( + getId(), + persister, + session.getFactory(), + session.getTenantIdentifier() + ); lock = cache.lockItem( session, ck, getCurrentVersion() ); return ck; } @@ -271,7 +276,7 @@ private Object lockCacheItem() { } } - private void unlockCacheItem() { + protected void unlockCacheItem() { final EntityPersister persister = getPersister(); if ( persister.canWriteToCache() ) { final EntityDataAccess cache = persister.getCacheAccessStrategy(); @@ -286,7 +291,7 @@ private void unlockCacheItem() { } } - private void removeCacheItem(Object ck) { + protected void removeCacheItem(Object ck) { final EntityPersister persister = getPersister(); if ( persister.canWriteToCache() ) { persister.getCacheAccessStrategy().remove( getSession(), ck ); diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityInsertAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityInsertAction.java index 8cae0da98268..7c82ff7017f9 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityInsertAction.java +++ b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityInsertAction.java @@ -100,7 +100,7 @@ public void execute() throws HibernateException { final EntityPersister persister = getPersister(); final Object instance = getInstance(); persister.insert( id, getState(), instance, session ); - PersistenceContext persistenceContext = session.getPersistenceContextInternal(); + final PersistenceContext persistenceContext = session.getPersistenceContextInternal(); final EntityEntry entry = persistenceContext.getEntry( instance ); if ( entry == null ) { throw new AssertionFailure( "possible non-threadsafe access to session" ); @@ -138,7 +138,7 @@ else if ( persister.isVersionPropertyGenerated() ) { } } - private void putCacheIfNecessary() { + protected void putCacheIfNecessary() { final EntityPersister persister = getPersister(); final SharedSessionContractImplementor session = getSession(); if ( isCachePutEnabled( persister, session ) ) { diff --git a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityUpdateAction.java b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityUpdateAction.java index 08dbb70acf8a..e528b411f3b5 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/internal/EntityUpdateAction.java +++ b/hibernate-core/src/main/java/org/hibernate/action/internal/EntityUpdateAction.java @@ -7,6 +7,7 @@ package org.hibernate.action.internal; import org.hibernate.AssertionFailure; +import org.hibernate.CacheMode; import org.hibernate.HibernateException; import org.hibernate.cache.CacheException; import org.hibernate.cache.spi.access.EntityDataAccess; @@ -179,7 +180,7 @@ public void execute() throws HibernateException { } } - private void handleNaturalIdResolutions(EntityPersister persister, SharedSessionContractImplementor session, Object id) { + protected void handleNaturalIdResolutions(EntityPersister persister, SharedSessionContractImplementor session, Object id) { if ( naturalIdMapping != null ) { session.getPersistenceContextInternal().getNaturalIdResolutions().manageSharedResolution( id, @@ -191,11 +192,11 @@ private void handleNaturalIdResolutions(EntityPersister persister, SharedSession } } - private void updateCacheItem(Object previousVersion, Object ck, EntityEntry entry) { + protected void updateCacheItem(Object previousVersion, Object ck, EntityEntry entry) { final EntityPersister persister = getPersister(); if ( persister.canWriteToCache() ) { final SharedSessionContractImplementor session = getSession(); - if ( persister.isCacheInvalidationRequired() || entry.getStatus() != Status.MANAGED ) { + if ( isCacheInvalidationRequired( persister, session ) || entry.getStatus() != Status.MANAGED ) { persister.getCacheAccessStrategy().remove( session, ck ); } else if ( session.getCacheMode().isPutEnabled() ) { @@ -215,6 +216,13 @@ else if ( session.getCacheMode().isPutEnabled() ) { } } + private static boolean isCacheInvalidationRequired( + EntityPersister persister, + SharedSessionContractImplementor session) { + // the cache has to be invalidated when CacheMode is equal to GET or IGNORE + return persister.isCacheInvalidationRequired() || session.getCacheMode() == CacheMode.GET || session.getCacheMode() == CacheMode.IGNORE; + } + private void handleGeneratedProperties(EntityEntry entry) { final EntityPersister persister = getPersister(); final Object instance = getInstance(); @@ -271,7 +279,7 @@ protected Object getPreviousVersion() { } } - private Object lockCacheItem(Object previousVersion) { + protected Object lockCacheItem(Object previousVersion) { final EntityPersister persister = getPersister(); if ( persister.canWriteToCache() ) { final SharedSessionContractImplementor session = getSession(); diff --git a/hibernate-core/src/main/java/org/hibernate/action/spi/Executable.java b/hibernate-core/src/main/java/org/hibernate/action/spi/Executable.java index 5795ad90452a..52ec4b40ca00 100644 --- a/hibernate-core/src/main/java/org/hibernate/action/spi/Executable.java +++ b/hibernate-core/src/main/java/org/hibernate/action/spi/Executable.java @@ -11,6 +11,8 @@ import org.hibernate.HibernateException; import org.hibernate.event.spi.EventSource; +import org.checkerframework.checker.nullness.qual.Nullable; + /** * An operation which may be scheduled for later execution. Usually, the * operation is a database insert/update/delete, together with required @@ -47,7 +49,7 @@ public interface Executable { * @return The after-transaction-completion process, or null if we have no * after-transaction-completion process */ - AfterTransactionCompletionProcess getAfterTransactionCompletionProcess(); + @Nullable AfterTransactionCompletionProcess getAfterTransactionCompletionProcess(); /** * Get the before-transaction-completion process, if any, for this action. @@ -55,7 +57,7 @@ public interface Executable { * @return The before-transaction-completion process, or null if we have no * before-transaction-completion process */ - BeforeTransactionCompletionProcess getBeforeTransactionCompletionProcess(); + @Nullable BeforeTransactionCompletionProcess getBeforeTransactionCompletionProcess(); /** * Reconnect to session after deserialization diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/Any.java b/hibernate-core/src/main/java/org/hibernate/annotations/Any.java index 20f0ff696738..242ad2da2cec 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/Any.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/Any.java @@ -42,6 +42,7 @@ * class Order { * ... * @Any + * @AnyKeyJavaClass(UUID.class) //the foreign key type * @JoinColumn(name="payment_id") //the foreign key column * @Column(name="payment_type") //the discriminator column * @AnyDiscriminatorValue(discriminator="CASH", entity=CashPayment.class) @@ -101,6 +102,10 @@ /** * Whether the association is optional. + *

+ * If the discriminator {@link jakarta.persistence.Column Column} or the + * {@link jakarta.persistence.JoinColumn JoinColumn} are not nullable the + * association is always considered non-optional, regardless of this value. * * @return {@code false} if the association cannot be null. */ diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/BatchSize.java b/hibernate-core/src/main/java/org/hibernate/annotations/BatchSize.java index 8aadcfe7267b..dea37e50c51e 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/BatchSize.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/BatchSize.java @@ -59,6 +59,8 @@ public @interface BatchSize { /** * The maximum batch size, a strictly positive integer. + *

+ * Default is defined by {@link org.hibernate.cfg.FetchSettings#DEFAULT_BATCH_FETCH_SIZE} */ int size(); } diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/Formula.java b/hibernate-core/src/main/java/org/hibernate/annotations/Formula.java index a2c2b4a3d9c0..df04bdacaaa7 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/Formula.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/Formula.java @@ -23,15 +23,15 @@ * A formula may involve multiple columns and SQL operators: *

  * // perform calculations using SQL operators
- * @Formula("sub_total + (sub_total * tax)")
- * long getTotalCost() { ... }
+ * @Formula("sub_total * (1.0 + tax)")
+ * BigDecimal totalWithTax;
  * 
*

* It may even call SQL functions: *

  * // call native SQL functions
  * @Formula("upper(substring(middle_name from 0 for 1))")
- * Character getMiddleInitial() { ... }
+ * Character middleInitial;
  * 
*

* For an entity with {@linkplain jakarta.persistence.SecondaryTable secondary tables}, diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollection.java b/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollection.java index 43499c29ef2f..3a3900c0976c 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollection.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollection.java @@ -31,9 +31,8 @@ * instead of {@code LazyCollection(FALSE)}. *

  • Use static methods of {@link org.hibernate.Hibernate}, * for example {@link org.hibernate.Hibernate#size(Collection)}, - * {@link org.hibernate.Hibernate#contains(Collection, Object)}, - * {@link org.hibernate.Hibernate#get(Map, Object)}, or - * {@link org.hibernate.Hibernate#remove(Map, Object)} instead + * {@link org.hibernate.Hibernate#contains(Collection, Object)}, or + * {@link org.hibernate.Hibernate#get(Map, Object)} instead * of {@code LazyCollection(EXTRA)}. * */ diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollectionOption.java b/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollectionOption.java index a310dfa0bf03..c4983bf5a2cd 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollectionOption.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/LazyCollectionOption.java @@ -25,9 +25,8 @@ * instead of {@code LazyCollection(FALSE)}. *
  • Use static methods of {@link org.hibernate.Hibernate}, * for example {@link org.hibernate.Hibernate#size(Collection)}, - * {@link org.hibernate.Hibernate#contains(Collection, Object)}, - * {@link org.hibernate.Hibernate#get(Map, Object)}, or - * {@link org.hibernate.Hibernate#remove(Map, Object)} instead + * {@link org.hibernate.Hibernate#contains(Collection, Object)}, or + * {@link org.hibernate.Hibernate#get(Map, Object)} instead * of {@code LazyCollection(EXTRA)}. * */ diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/Mutability.java b/hibernate-core/src/main/java/org/hibernate/annotations/Mutability.java index c18ed2812ab5..a763ef4faa51 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/Mutability.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/Mutability.java @@ -20,7 +20,20 @@ import static java.lang.annotation.RetentionPolicy.RUNTIME; /** - * Specifies a {@link MutabilityPlan} for a some sort of basic value mapping. + * Specifies a {@link MutabilityPlan} for a basic value mapping. + * + * Mutability refers to whether the internal state of a value can change. + * For example, {@linkplain java.util.Date Date} is considered mutable because its + * internal state can be changed using {@link java.util.Date#setTime} whereas + * {@linkplain java.lang.String String} is considered immutable because its internal + * state cannot be changed. Hibernate uses this distinction when it can for internal + * optimizations. + * + * Hibernate understands the inherent mutability of a large number of Java types - + * {@linkplain java.util.Date Date}, {@linkplain java.lang.String String}, etc. + * {@linkplain Mutability} and friends allow plugging in specific strategies. + * + * * *

    Mutability for basic-typed attributes

    *

    diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/Struct.java b/hibernate-core/src/main/java/org/hibernate/annotations/Struct.java index a7266436ccc2..b045ee365a7f 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/Struct.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/Struct.java @@ -47,4 +47,12 @@ * The name of the UDT (user defined type). */ String name(); + + /** + * The ordered set of attributes of the UDT, as they appear physically in the DDL. + * It is important to specify the attributes in the same order for JDBC interactions to work correctly. + * If the annotated type is a record, the order of record components is used as the default order. + * If no default order can be inferred, attributes are assumed to be in alphabetical order. + */ + String[] attributes() default {}; } diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/TimeZoneStorageType.java b/hibernate-core/src/main/java/org/hibernate/annotations/TimeZoneStorageType.java index f2083acc0fa0..f601ad4272e5 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/TimeZoneStorageType.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/TimeZoneStorageType.java @@ -78,15 +78,16 @@ public enum TimeZoneStorageType { /** * Does not store the time zone, and instead: *

      - *
    • normalizes JDBC timestamps to the - * {@linkplain org.hibernate.cfg.AvailableSettings#JDBC_TIME_ZONE - * JDBC timezone}, if set, or - *
    • passes them through in the JVM default time zone - * otherwise. + *
    • when persisting to the database, normalizes JDBC timestamps to the + * {@linkplain org.hibernate.cfg.AvailableSettings#JDBC_TIME_ZONE} + * or to the JVM default time zone if not set. + *
    • when reading back from the database, sets the offset or zone + * of {@code OffsetDateTime}/{@code ZonedDateTime} values + * to the JVM default time zone. *
    *

    * Provided partly for backward compatibility with older - * versions of Hibernate + * versions of Hibernate. */ NORMALIZE, /** diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/UuidGenerator.java b/hibernate-core/src/main/java/org/hibernate/annotations/UuidGenerator.java index 4d36705f25ea..258307739595 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/UuidGenerator.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/UuidGenerator.java @@ -30,11 +30,11 @@ enum Style { /** - * Defaults to {@link #RANDOM} + * Defaults to {@link #RANDOM}. */ AUTO, /** - * Uses {@link UUID#randomUUID()} to generate values + * Uses {@link UUID#randomUUID()} to generate values. */ RANDOM, /** @@ -48,7 +48,7 @@ enum Style { } /** - * Which style of generation should be used + * Specifies which {@linkplain Style style} of UUID generation should be used. */ Style style() default Style.AUTO; } diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/Where.java b/hibernate-core/src/main/java/org/hibernate/annotations/Where.java index d73ff2b2e8aa..b517e5b43687 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/Where.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/Where.java @@ -16,7 +16,7 @@ /** * Specifies a restriction written in native SQL to add to the generated - * SQL when querying an entity or collection. + * SQL for entities or collections. *

    * For example, {@code @Where} could be used to hide entity instances which * have been soft-deleted, either for the entity class itself: @@ -49,13 +49,9 @@ * *

    * By default, {@code @Where} restrictions declared for an entity are - * applied when loading a collection of that entity type. This behavior is - * controlled by: - *

      - *
    1. the annotation member {@link #applyInToManyFetch()}, and - *
    2. the configuration property - * {@value org.hibernate.cfg.AvailableSettings#USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS}. - *
    + * applied when loading associations of that entity type. This behavior can + * be disabled using the setting {@value org.hibernate.cfg.AvailableSettings#USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS}; + * note, however, that setting is disabled. *

    * Note that {@code @Where} restrictions are always applied and cannot be * disabled. Nor may they be parameterized. They're therefore much @@ -75,21 +71,4 @@ * A predicate, written in native SQL. */ String clause(); - - /** - * If this restriction applies to an entity type, should it also be - * applied when fetching a {@link jakarta.persistence.OneToMany} or - * {@link jakarta.persistence.ManyToOne} association that targets - * the entity type? - *

    - * By default, the restriction is not applied unless the property - * {@value org.hibernate.cfg.AvailableSettings#USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS} - * is explicitly disabled. - * - * @return {@code true} if the restriction should be applied even - * if the configuration property is not enabled - * - * @since 6.2 - */ - boolean applyInToManyFetch() default false; } diff --git a/hibernate-core/src/main/java/org/hibernate/annotations/package-info.java b/hibernate-core/src/main/java/org/hibernate/annotations/package-info.java index 45c306af7a4a..2fcf704d1fef 100644 --- a/hibernate-core/src/main/java/org/hibernate/annotations/package-info.java +++ b/hibernate-core/src/main/java/org/hibernate/annotations/package-info.java @@ -209,8 +209,8 @@ * The JPA-defined {@link jakarta.persistence.Cacheable} annotation is almost useless * to us, since: *

      - *
    • it provides no way to specify any information about the nature of the how - * cached entity and how its cache should be managed, and + *
    • it provides no way to specify any information about the nature of the cached + * entity and how its cache should be managed, and *
    • it may not be used to annotate associations. *
    *

    diff --git a/hibernate-core/src/main/java/org/hibernate/boot/BootLogging.java b/hibernate-core/src/main/java/org/hibernate/boot/BootLogging.java index 7f56a9cafdeb..102b6af69692 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/BootLogging.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/BootLogging.java @@ -21,7 +21,4 @@ public interface BootLogging { String NAME = SubSystemLogging.BASE + ".boot"; Logger BOOT_LOGGER = Logger.getLogger( NAME ); - - boolean DEBUG_ENABLED = BOOT_LOGGER.isDebugEnabled(); - boolean TRACE_ENABLED = BOOT_LOGGER.isTraceEnabled(); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/SessionFactoryBuilder.java b/hibernate-core/src/main/java/org/hibernate/boot/SessionFactoryBuilder.java index aae26df31fe0..2b30268d3cb1 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/SessionFactoryBuilder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/SessionFactoryBuilder.java @@ -10,6 +10,7 @@ import org.hibernate.CustomEntityDirtinessStrategy; import org.hibernate.EntityNameResolver; +import org.hibernate.Incubating; import org.hibernate.Interceptor; import org.hibernate.SessionFactory; import org.hibernate.SessionFactoryObserver; @@ -22,6 +23,7 @@ import org.hibernate.query.sqm.function.SqmFunctionDescriptor; import org.hibernate.resource.jdbc.spi.PhysicalConnectionHandlingMode; import org.hibernate.resource.jdbc.spi.StatementInspector; +import org.hibernate.type.format.FormatMapper; /** * The contract for building a {@link SessionFactory} given a number of options. @@ -696,6 +698,30 @@ public interface SessionFactoryBuilder { */ SessionFactoryBuilder enableJpaClosedCompliance(boolean enabled); + /** + * Specifies a {@link FormatMapper format mapper} to use for serialization/deserialization of JSON properties. + * + * @param jsonFormatMapper The {@link FormatMapper} to use. + * + * @return {@code this}, for method chaining + * + * @see org.hibernate.cfg.AvailableSettings#JSON_FORMAT_MAPPER + */ + @Incubating + SessionFactoryBuilder applyJsonFormatMapper(FormatMapper jsonFormatMapper); + + /** + * Specifies a {@link FormatMapper format mapper} to use for serialization/deserialization of XML properties. + * + * @param xmlFormatMapper The {@link FormatMapper} to use. + * + * @return {@code this}, for method chaining + * + * @see org.hibernate.cfg.AvailableSettings#XML_FORMAT_MAPPER + */ + @Incubating + SessionFactoryBuilder applyXmlFormatMapper(FormatMapper xmlFormatMapper); + /** * After all options have been set, build the SessionFactory. * diff --git a/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/BeanValidationIntegrator.java b/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/BeanValidationIntegrator.java index 5e2e6366eacc..f2b20538806b 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/BeanValidationIntegrator.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/BeanValidationIntegrator.java @@ -36,7 +36,6 @@ public class BeanValidationIntegrator implements Integrator { public static final String APPLY_CONSTRAINTS = "hibernate.validator.apply_to_ddl"; - public static final String BV_CHECK_CLASS = "jakarta.validation.ConstraintViolation"; public static final String JAKARTA_BV_CHECK_CLASS = "jakarta.validation.ConstraintViolation"; public static final String MODE_PROPERTY = "javax.persistence.validation.mode"; @@ -165,12 +164,7 @@ private boolean isBeanValidationApiAvailable(ClassLoaderService classLoaderServi classLoaderService.classForName( JAKARTA_BV_CHECK_CLASS ); } catch (Exception e) { - try { - classLoaderService.classForName( BV_CHECK_CLASS ); - } - catch (Exception e2) { - return false; - } + return false; } return true; } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/HibernateTraversableResolver.java b/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/HibernateTraversableResolver.java index 1058a19823e5..c0afa9d0dc1d 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/HibernateTraversableResolver.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/HibernateTraversableResolver.java @@ -15,8 +15,11 @@ import org.hibernate.Hibernate; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.persister.entity.EntityPersister; +import org.hibernate.type.AnyType; import org.hibernate.type.CollectionType; +import org.hibernate.type.ComponentType; import org.hibernate.type.CompositeType; +import org.hibernate.type.EntityType; import org.hibernate.type.Type; import jakarta.validation.Path; @@ -54,17 +57,17 @@ private void addAssociationsToTheSetForAllProperties(String[] names, Type[] type private void addAssociationsToTheSetForOneProperty(String name, Type type, String prefix, SessionFactoryImplementor factory) { - if ( type.isCollectionType() ) { + if ( type instanceof CollectionType ) { CollectionType collType = (CollectionType) type; Type assocType = collType.getElementType( factory ); addAssociationsToTheSetForOneProperty(name, assocType, prefix, factory); } //ToOne association - else if ( type.isEntityType() || type.isAnyType() ) { + else if ( type instanceof EntityType || type instanceof AnyType ) { associations.add( prefix + name ); } - else if ( type.isComponentType() ) { - CompositeType componentType = (CompositeType) type; + else if ( type instanceof ComponentType ) { + ComponentType componentType = (ComponentType) type; addAssociationsToTheSetForAllProperties( componentType.getPropertyNames(), componentType.getSubtypes(), diff --git a/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/TypeSafeActivator.java b/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/TypeSafeActivator.java index 6b02747b7ef0..f1a5c40833e3 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/TypeSafeActivator.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/beanvalidation/TypeSafeActivator.java @@ -483,11 +483,6 @@ private static ValidatorFactory getValidatorFactory(ActivationContext activation return Validation.buildDefaultValidatorFactory(); } catch ( Exception e ) { - LOG.infof( - e, - "Error calling `%s`", - "jakarta.validation.Validation#buildDefaultValidatorFactory" - ); throw new IntegrationException( "Unable to build the default ValidatorFactory", e ); } } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/internal/BootstrapContextImpl.java b/hibernate-core/src/main/java/org/hibernate/boot/internal/BootstrapContextImpl.java index 908c827dc864..c2670ad410ce 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/internal/BootstrapContextImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/internal/BootstrapContextImpl.java @@ -40,7 +40,7 @@ import org.hibernate.query.sqm.function.SqmFunctionDescriptor; import org.hibernate.query.sqm.function.SqmFunctionRegistry; import org.hibernate.resource.beans.spi.BeanInstanceProducer; -import org.hibernate.type.internal.BasicTypeImpl; +import org.hibernate.type.BasicType; import org.hibernate.type.spi.TypeConfiguration; import org.jboss.jandex.IndexView; @@ -220,17 +220,17 @@ public Collection getCacheRegionDefinitions() { return cacheRegionDefinitions == null ? Collections.emptyList() : cacheRegionDefinitions; } - private final Map> adHocBasicTypeRegistrations = new HashMap<>(); + private final Map> adHocBasicTypeRegistrations = new HashMap<>(); @Override - public void registerAdHocBasicType(BasicTypeImpl basicType) { + public void registerAdHocBasicType(BasicType basicType) { adHocBasicTypeRegistrations.put( basicType.getName(), basicType ); } @Override - public BasicTypeImpl resolveAdHocBasicType(String key) { + public BasicType resolveAdHocBasicType(String key) { //noinspection unchecked - return (BasicTypeImpl) adHocBasicTypeRegistrations.get( key ); + return (BasicType) adHocBasicTypeRegistrations.get( key ); } @Override diff --git a/hibernate-core/src/main/java/org/hibernate/boot/internal/InFlightMetadataCollectorImpl.java b/hibernate-core/src/main/java/org/hibernate/boot/internal/InFlightMetadataCollectorImpl.java index 4cb4cb58b0d0..e03f2f5dc8a8 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/internal/InFlightMetadataCollectorImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/internal/InFlightMetadataCollectorImpl.java @@ -48,6 +48,7 @@ import org.hibernate.boot.model.internal.FkSecondPass; import org.hibernate.boot.model.internal.IdGeneratorResolverSecondPass; import org.hibernate.boot.model.internal.JPAIndexHolder; +import org.hibernate.boot.model.internal.OptionalDeterminationSecondPass; import org.hibernate.boot.model.internal.QuerySecondPass; import org.hibernate.boot.model.internal.SecondaryTableFromAnnotationSecondPass; import org.hibernate.boot.model.internal.SecondaryTableSecondPass; @@ -141,6 +142,7 @@ public class InFlightMetadataCollectorImpl implements InFlightMetadataCollector, private final Map entityBindingMap = new HashMap<>(); private final List composites = new ArrayList<>(); + private final Map, Component> genericComponentsMap = new HashMap<>(); private final Map collectionBindingMap = new HashMap<>(); private final Map filterDefinitionMap = new HashMap<>(); @@ -282,6 +284,16 @@ public void visitRegisteredComponents(Consumer consumer) { composites.forEach( consumer ); } + @Override + public void registerGenericComponent(Component component) { + genericComponentsMap.put( component.getComponentClass(), component ); + } + + @Override + public Component getGenericComponent(Class componentClass) { + return genericComponentsMap.get( componentClass ); + } + @Override public SessionFactoryBuilder getSessionFactoryBuilder() { throw new UnsupportedOperationException( @@ -1656,6 +1668,7 @@ public Join locateJoin(Identifier tableName) { private ArrayList implicitColumnNamingSecondPassList; private ArrayList generalSecondPassList; + private ArrayList optionalDeterminationSecondPassList; @Override public void addSecondPass(SecondPass secondPass) { @@ -1694,6 +1707,9 @@ else if ( secondPass instanceof QuerySecondPass ) { else if ( secondPass instanceof ImplicitColumnNamingSecondPass ) { addImplicitColumnNamingSecondPass( (ImplicitColumnNamingSecondPass) secondPass ); } + else if ( secondPass instanceof OptionalDeterminationSecondPass ) { + addOptionalDeterminationSecondPass( (OptionalDeterminationSecondPass) secondPass ); + } else { // add to the general SecondPass list if ( generalSecondPassList == null ) { @@ -1775,6 +1791,13 @@ private void addImplicitColumnNamingSecondPass(ImplicitColumnNamingSecondPass se implicitColumnNamingSecondPassList.add( secondPass ); } + private void addOptionalDeterminationSecondPass(OptionalDeterminationSecondPass secondPass) { + if ( optionalDeterminationSecondPassList == null ) { + optionalDeterminationSecondPassList = new ArrayList<>(); + } + optionalDeterminationSecondPassList.add( secondPass ); + } + private boolean inSecondPass = false; @@ -1801,6 +1824,7 @@ public void processSecondPasses(MetadataBuildingContext buildingContext) { processSecondPasses( querySecondPassList ); processSecondPasses( generalSecondPassList ); + processSecondPasses( optionalDeterminationSecondPassList ); processPropertyReferences(); @@ -2313,6 +2337,7 @@ public MetadataImpl buildMetadataInstance(MetadataBuildingContext buildingContex options, entityBindingMap, composites, + genericComponentsMap, mappedSuperClasses, collectionBindingMap, typeDefRegistry.copyRegistrationMap(), @@ -2341,6 +2366,12 @@ private void processExportableProducers() { final Dialect dialect = getDatabase().getJdbcEnvironment().getDialect(); for ( PersistentClass entityBinding : entityBindingMap.values() ) { + entityBinding.assignCheckConstraintsToTable( + dialect, + bootstrapContext.getTypeConfiguration(), + bootstrapContext.getFunctionRegistry() + ); + if ( entityBinding.isInherited() ) { continue; } @@ -2350,6 +2381,7 @@ private void processExportableProducers() { dialect, (RootClass) entityBinding ); + } for ( Collection collection : collectionBindingMap.values() ) { diff --git a/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataBuilderImpl.java b/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataBuilderImpl.java index ced669ef1de1..5c468ee04654 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataBuilderImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataBuilderImpl.java @@ -61,23 +61,28 @@ import org.hibernate.cache.spi.access.AccessType; import org.hibernate.cfg.AvailableSettings; import org.hibernate.cfg.MetadataSourceType; +import org.hibernate.dialect.Dialect; import org.hibernate.dialect.TimeZoneSupport; import org.hibernate.engine.config.spi.ConfigurationService; import org.hibernate.engine.config.spi.StandardConverters; -import org.hibernate.engine.jdbc.connections.spi.MultiTenantConnectionProvider; +import org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentImpl; import org.hibernate.engine.jdbc.spi.JdbcServices; import org.hibernate.id.factory.IdentifierGeneratorFactory; import org.hibernate.internal.CoreLogging; import org.hibernate.internal.CoreMessageLogger; import org.hibernate.internal.log.DeprecationLogger; +import org.hibernate.internal.util.NullnessHelper; import org.hibernate.internal.util.StringHelper; import org.hibernate.internal.util.collections.CollectionHelper; +import org.hibernate.jpa.spi.JpaCompliance; import org.hibernate.metamodel.CollectionClassification; import org.hibernate.query.sqm.function.SqmFunctionDescriptor; import org.hibernate.query.sqm.function.SqmFunctionRegistry; import org.hibernate.service.ServiceRegistry; import org.hibernate.service.spi.ServiceException; import org.hibernate.type.BasicType; +import org.hibernate.type.SqlTypes; +import org.hibernate.type.WrapperArrayHandling; import org.hibernate.type.spi.TypeConfiguration; import org.hibernate.usertype.UserType; @@ -87,6 +92,10 @@ import jakarta.persistence.ConstraintMode; import jakarta.persistence.SharedCacheMode; +import static org.hibernate.cfg.AvailableSettings.JPA_COMPLIANCE; +import static org.hibernate.cfg.AvailableSettings.WRAPPER_ARRAY_HANDLING; +import static org.hibernate.engine.config.spi.StandardConverters.BOOLEAN; + /** * @author Steve Ebersole */ @@ -472,7 +481,7 @@ public MappingDefaultsImpl(StandardServiceRegistry serviceRegistry) { this.implicitlyQuoteIdentifiers = configService.getSetting( AvailableSettings.GLOBALLY_QUOTED_IDENTIFIERS, - StandardConverters.BOOLEAN, + BOOLEAN, false ); @@ -582,6 +591,7 @@ public static class MetadataBuildingOptionsImpl private final MappingDefaultsImpl mappingDefaults; private final IdentifierGeneratorFactory identifierGeneratorFactory; private final TimeZoneStorageType defaultTimezoneStorage; + private final WrapperArrayHandling wrapperArrayHandling; // todo (6.0) : remove bootstrapContext property along with the deprecated methods private BootstrapContext bootstrapContext; @@ -619,29 +629,30 @@ public MetadataBuildingOptionsImpl(StandardServiceRegistry serviceRegistry) { this.mappingDefaults = new MappingDefaultsImpl( serviceRegistry ); this.defaultTimezoneStorage = resolveTimeZoneStorageStrategy( configService ); - this.multiTenancyEnabled = serviceRegistry.getService(MultiTenantConnectionProvider.class)!=null; + this.wrapperArrayHandling = resolveWrapperArrayHandling( configService, serviceRegistry ); + this.multiTenancyEnabled = JdbcEnvironmentImpl.isMultiTenancyEnabled( serviceRegistry ); this.xmlMappingEnabled = configService.getSetting( AvailableSettings.XML_MAPPING_ENABLED, - StandardConverters.BOOLEAN, + BOOLEAN, true ); this.implicitDiscriminatorsForJoinedInheritanceSupported = configService.getSetting( AvailableSettings.IMPLICIT_DISCRIMINATOR_COLUMNS_FOR_JOINED_SUBCLASS, - StandardConverters.BOOLEAN, + BOOLEAN, false ); this.explicitDiscriminatorsForJoinedInheritanceSupported = !configService.getSetting( AvailableSettings.IGNORE_EXPLICIT_DISCRIMINATOR_COLUMNS_FOR_JOINED_SUBCLASS, - StandardConverters.BOOLEAN, + BOOLEAN, false ); this.implicitlyForceDiscriminatorInSelect = configService.getSetting( AvailableSettings.FORCE_DISCRIMINATOR_IN_SELECTS_BY_DEFAULT, - StandardConverters.BOOLEAN, + BOOLEAN, false ); @@ -705,7 +716,7 @@ public MetadataBuildingOptionsImpl(StandardServiceRegistry serviceRegistry) { this.specjProprietarySyntaxEnabled = configService.getSetting( "hibernate.enable_specj_proprietary_syntax", - StandardConverters.BOOLEAN, + BOOLEAN, false ); @@ -755,7 +766,7 @@ public ColumnOrderingStrategy call() { this.useNationalizedCharacterData = configService.getSetting( AvailableSettings.USE_NATIONALIZED_CHARACTER_DATA, - StandardConverters.BOOLEAN, + BOOLEAN, false ); @@ -767,7 +778,7 @@ public ColumnOrderingStrategy call() { allowExtensionsInCdi = configService.getSetting( AvailableSettings.ALLOW_EXTENSIONS_IN_CDI, - StandardConverters.BOOLEAN, + BOOLEAN, false ); } @@ -868,6 +879,11 @@ private TimeZoneStorageStrategy toTimeZoneStorageStrategy(TimeZoneSupport timeZo } } + @Override + public WrapperArrayHandling getWrapperArrayHandling() { + return wrapperArrayHandling; + } + @Override public List getBasicTypeRegistrations() { return basicTypeRegistrations; @@ -959,7 +975,7 @@ public boolean isXmlMappingEnabled() { } @Override - public boolean disallowExtensionsInCdi() { + public boolean isAllowExtensionsInCdi() { return allowExtensionsInCdi; } @@ -999,4 +1015,40 @@ private static TimeZoneStorageType resolveTimeZoneStorageStrategy( TimeZoneStorageType.DEFAULT ); } + + private static WrapperArrayHandling resolveWrapperArrayHandling( + ConfigurationService configService, + StandardServiceRegistry serviceRegistry) { + final WrapperArrayHandling setting = NullnessHelper.coalesceSuppliedValues( + () -> configService.getSetting( + WRAPPER_ARRAY_HANDLING, + WrapperArrayHandling::interpretExternalSettingLeniently + ), + () -> resolveFallbackWrapperArrayHandling( configService, serviceRegistry ) + ); + + if ( setting == WrapperArrayHandling.PICK ) { + final Dialect dialect = serviceRegistry.getService( JdbcServices.class ).getDialect(); + if ( dialect.supportsStandardArrays() + && ( dialect.getPreferredSqlTypeCodeForArray() == SqlTypes.ARRAY + || dialect.getPreferredSqlTypeCodeForArray() == SqlTypes.SQLXML ) ) { + return WrapperArrayHandling.ALLOW; + } + + return WrapperArrayHandling.LEGACY; + } + + return setting; + }; + + private static WrapperArrayHandling resolveFallbackWrapperArrayHandling( + ConfigurationService configService, + StandardServiceRegistry serviceRegistry) { + if ( configService.getSetting( JPA_COMPLIANCE, BOOLEAN ) == Boolean.TRUE ) { + // JPA compliance was enabled. Use PICK + return WrapperArrayHandling.PICK; + } + + return WrapperArrayHandling.DISALLOW; + } } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataImpl.java b/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataImpl.java index 932074fb625f..019d73f3884e 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/internal/MetadataImpl.java @@ -90,6 +90,7 @@ public class MetadataImpl implements MetadataImplementor, Serializable { private final Map entityBindingMap; private final List composites; + private final Map, Component> genericComponentsMap; private final Map, MappedSuperclass> mappedSuperclassMap; private final Map collectionBindingMap; private final Map typeDefinitionMap; @@ -110,6 +111,7 @@ public MetadataImpl( MetadataBuildingOptions metadataBuildingOptions, Map entityBindingMap, List composites, + Map, Component> genericComponentsMap, Map, MappedSuperclass> mappedSuperclassMap, Map collectionBindingMap, Map typeDefinitionMap, @@ -129,6 +131,7 @@ public MetadataImpl( this.metadataBuildingOptions = metadataBuildingOptions; this.entityBindingMap = entityBindingMap; this.composites = composites; + this.genericComponentsMap = genericComponentsMap; this.mappedSuperclassMap = mappedSuperclassMap; this.collectionBindingMap = collectionBindingMap; this.typeDefinitionMap = typeDefinitionMap; @@ -414,18 +417,6 @@ public void orderColumns(boolean forceOrdering) { primaryKey.reorderColumns( primaryKeyColumns ); } } - for ( UniqueKey uniqueKey : table.getUniqueKeys().values() ) { - if ( uniqueKey.getColumns().size() > 1 ) { - final List uniqueKeyColumns = columnOrderingStrategy.orderConstraintColumns( - uniqueKey, - this - ); - if ( uniqueKeyColumns != null ) { - uniqueKey.getColumns().clear(); - uniqueKey.getColumns().addAll( uniqueKeyColumns ); - } - } - } for ( ForeignKey foreignKey : table.getForeignKeys().values() ) { final List columns = foreignKey.getColumns(); if ( columns.size() > 1 ) { @@ -570,6 +561,11 @@ public void visitRegisteredComponents(Consumer consumer) { composites.forEach( consumer ); } + @Override + public Component getGenericComponent(Class componentClass) { + return genericComponentsMap.get( componentClass ); + } + @Override public org.hibernate.type.Type getIdentifierType(String entityName) throws MappingException { final PersistentClass pc = entityBindingMap.get( entityName ); @@ -661,4 +657,8 @@ public java.util.List getComposites() { return composites; } + public Map, Component> getGenericComponentsMap() { + return genericComponentsMap; + } + } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/internal/NamedProcedureCallDefinitionImpl.java b/hibernate-core/src/main/java/org/hibernate/boot/internal/NamedProcedureCallDefinitionImpl.java index 76129a629dad..ea6ff3b03a46 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/internal/NamedProcedureCallDefinitionImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/internal/NamedProcedureCallDefinitionImpl.java @@ -23,7 +23,7 @@ import org.hibernate.procedure.internal.Util; import org.hibernate.procedure.spi.NamedCallableQueryMemento; import org.hibernate.procedure.spi.ParameterStrategy; -import org.hibernate.query.results.ResultSetMappingImpl; +import org.hibernate.query.results.ResultSetMapping; import jakarta.persistence.NamedStoredProcedureQuery; import jakarta.persistence.ParameterMode; @@ -86,7 +86,7 @@ public NamedCallableQueryMemento resolve(SessionFactoryImplementor sessionFactor final boolean specifiesResultClasses = resultClasses != null && resultClasses.length > 0; final boolean specifiesResultSetMappings = resultSetMappings != null && resultSetMappings.length > 0; - ResultSetMappingImpl resultSetMapping = new ResultSetMappingImpl( registeredName ); + final ResultSetMapping resultSetMapping = buildResultSetMapping( registeredName, sessionFactory ); if ( specifiesResultClasses ) { Util.resolveResultSetMappingClasses( @@ -125,6 +125,13 @@ else if ( specifiesResultSetMappings ) { ); } + private ResultSetMapping buildResultSetMapping(String registeredName, SessionFactoryImplementor sessionFactory) { + return sessionFactory + .getFastSessionServices() + .getJdbcValuesMappingProducerProvider() + .buildResultSetMapping( registeredName, false, sessionFactory ); + } + static class ParameterDefinitions { private final ParameterStrategy parameterStrategy; private final ParameterDefinition[] parameterDefinitions; diff --git a/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryBuilderImpl.java b/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryBuilderImpl.java index e29ece41fa78..9318852dc72a 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryBuilderImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryBuilderImpl.java @@ -31,6 +31,7 @@ import org.hibernate.query.sqm.function.SqmFunctionDescriptor; import org.hibernate.resource.jdbc.spi.PhysicalConnectionHandlingMode; import org.hibernate.resource.jdbc.spi.StatementInspector; +import org.hibernate.type.format.FormatMapper; /** * @author Gail Badner @@ -416,6 +417,18 @@ public SessionFactoryBuilder enableJpaClosedCompliance(boolean enabled) { return this; } + @Override + public SessionFactoryBuilder applyJsonFormatMapper(FormatMapper jsonFormatMapper) { + this.optionsBuilder.applyJsonFormatMapper( jsonFormatMapper ); + return this; + } + + @Override + public SessionFactoryBuilder applyXmlFormatMapper(FormatMapper xmlFormatMapper) { + this.optionsBuilder.applyXmlFormatMapper( xmlFormatMapper ); + return this; + } + @Override public void disableRefreshDetachedEntity() { this.optionsBuilder.disableRefreshDetachedEntity(); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryOptionsBuilder.java b/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryOptionsBuilder.java index 36f49123b81d..2e90a5ccb29f 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryOptionsBuilder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/internal/SessionFactoryOptionsBuilder.java @@ -40,7 +40,7 @@ import org.hibernate.context.spi.CurrentTenantIdentifierResolver; import org.hibernate.dialect.Dialect; import org.hibernate.engine.config.spi.ConfigurationService; -import org.hibernate.engine.jdbc.connections.spi.MultiTenantConnectionProvider; +import org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentImpl; import org.hibernate.engine.jdbc.env.spi.ExtractedDatabaseMetaData; import org.hibernate.engine.jdbc.spi.JdbcServices; import org.hibernate.id.uuid.LocalObjectUuidHelper; @@ -154,8 +154,8 @@ public class SessionFactoryOptionsBuilder implements SessionFactoryOptions { // integration private Object beanManagerReference; private Object validatorFactoryReference; - private final FormatMapper jsonFormatMapper; - private final FormatMapper xmlFormatMapper; + private FormatMapper jsonFormatMapper; + private FormatMapper xmlFormatMapper; // SessionFactory behavior private final boolean jpaBootstrap; @@ -352,7 +352,7 @@ public SessionFactoryOptionsBuilder(StandardServiceRegistry serviceRegistry, Boo this.checkNullability = configurationService.getSetting( CHECK_NULLABILITY, BOOLEAN, true ); this.initializeLazyStateOutsideTransactions = configurationService.getSetting( ENABLE_LAZY_LOAD_NO_TRANS, BOOLEAN, false ); - this.multiTenancyEnabled = serviceRegistry.getService(MultiTenantConnectionProvider.class)!=null; + this.multiTenancyEnabled = JdbcEnvironmentImpl.isMultiTenancyEnabled( serviceRegistry ); this.currentTenantIdentifierResolver = strategySelector.resolveStrategy( CurrentTenantIdentifierResolver.class, configurationSettings.get( MULTI_TENANT_IDENTIFIER_RESOLVER ) @@ -510,18 +510,16 @@ public SessionFactoryOptionsBuilder(StandardServiceRegistry serviceRegistry, Boo this.commentsEnabled = getBoolean( USE_SQL_COMMENTS, configurationSettings ); - this.preferUserTransaction = getBoolean( PREFER_USER_TRANSACTION, configurationSettings, false ); + this.preferUserTransaction = getBoolean( PREFER_USER_TRANSACTION, configurationSettings ); this.allowOutOfTransactionUpdateOperations = getBoolean( ALLOW_UPDATE_OUTSIDE_TRANSACTION, - configurationSettings, - false + configurationSettings ); this.releaseResourcesOnCloseEnabled = getBoolean( DISCARD_PC_ON_CLOSE, - configurationSettings, - false + configurationSettings ); Object jdbcTimeZoneValue = configurationSettings.get( @@ -555,8 +553,7 @@ else if ( jdbcTimeZoneValue != null ) { this.failOnPaginationOverCollectionFetchEnabled = getBoolean( FAIL_ON_PAGINATION_OVER_COLLECTION_FETCH, - configurationSettings, - false + configurationSettings ); this.immutableEntityUpdateQueryHandlingMode = ImmutableEntityUpdateQueryHandlingMode.interpret( @@ -568,8 +565,7 @@ else if ( jdbcTimeZoneValue != null ) { this.inClauseParameterPaddingEnabled = getBoolean( IN_CLAUSE_PARAMETER_PADDING, - configurationSettings, - false + configurationSettings ); this.queryStatisticsMaxSize = getInt( @@ -1237,6 +1233,14 @@ public void applyValidatorFactory(Object validatorFactory) { this.validatorFactoryReference = validatorFactory; } + public void applyJsonFormatMapper(FormatMapper jsonFormatMapper) { + this.jsonFormatMapper = jsonFormatMapper; + } + + public void applyXmlFormatMapper(FormatMapper xmlFormatMapper) { + this.xmlFormatMapper = xmlFormatMapper; + } + public void applySessionFactoryName(String sessionFactoryName) { this.sessionFactoryName = sessionFactoryName; } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/jaxb/JaxbLogger.java b/hibernate-core/src/main/java/org/hibernate/boot/jaxb/JaxbLogger.java index 666d7fe00a94..4e3bd28d1291 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/jaxb/JaxbLogger.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/jaxb/JaxbLogger.java @@ -25,9 +25,5 @@ ) public interface JaxbLogger extends BasicLogger { String LOGGER_NAME = BootLogging.NAME + ".jaxb"; - JaxbLogger JAXB_LOGGER = Logger.getMessageLogger( JaxbLogger.class, LOGGER_NAME ); - - boolean TRACE_ENABLED = JAXB_LOGGER.isTraceEnabled(); - boolean DEBUG_ENABLED = JAXB_LOGGER.isDebugEnabled(); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/jaxb/internal/stax/LocalXmlResourceResolver.java b/hibernate-core/src/main/java/org/hibernate/boot/jaxb/internal/stax/LocalXmlResourceResolver.java index 2e5ea0ec3aa5..7f46731dfa48 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/jaxb/internal/stax/LocalXmlResourceResolver.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/jaxb/internal/stax/LocalXmlResourceResolver.java @@ -203,14 +203,14 @@ public String getIdentifierBase() { public boolean matches(String publicId, String systemId) { if ( publicId != null ) { if ( publicId.startsWith( httpBase ) - || publicId.matches( httpsBase ) ) { + || publicId.startsWith( httpsBase ) ) { return true; } } if ( systemId != null ) { if ( systemId.startsWith( httpBase ) - || systemId.matches( httpsBase ) ) { + || systemId.startsWith( httpsBase ) ) { return true; } } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/FunctionContributor.java b/hibernate-core/src/main/java/org/hibernate/boot/model/FunctionContributor.java index 02a7b2923321..11ec12af0547 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/FunctionContributor.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/FunctionContributor.java @@ -14,6 +14,8 @@ package org.hibernate.boot.model; +import org.hibernate.service.JavaServiceLoadable; + /** * On object that contributes custom HQL functions, eventually to a * {@link org.hibernate.query.sqm.function.SqmFunctionRegistry}, via an @@ -33,6 +35,7 @@ * * @author Karel Maesen */ +@JavaServiceLoadable public interface FunctionContributor { /** diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/TypeContributor.java b/hibernate-core/src/main/java/org/hibernate/boot/model/TypeContributor.java index 6d2ee6688228..8f2176812d9e 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/TypeContributor.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/TypeContributor.java @@ -6,6 +6,7 @@ */ package org.hibernate.boot.model; +import org.hibernate.service.JavaServiceLoadable; import org.hibernate.service.ServiceRegistry; /** @@ -31,6 +32,7 @@ * * @see org.hibernate.type.spi.TypeConfiguration */ +@JavaServiceLoadable public interface TypeContributor { /** * Contribute types diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/TypeDefinition.java b/hibernate-core/src/main/java/org/hibernate/boot/model/TypeDefinition.java index f4b9d526849c..7e43d7b1c322 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/TypeDefinition.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/TypeDefinition.java @@ -29,6 +29,7 @@ import org.hibernate.resource.beans.spi.ManagedBeanRegistry; import org.hibernate.type.BasicType; import org.hibernate.type.CustomType; +import org.hibernate.type.JavaObjectType; import org.hibernate.type.SerializableType; import org.hibernate.type.Type; import org.hibernate.type.descriptor.converter.spi.BasicValueConverter; @@ -221,57 +222,13 @@ public MutabilityPlan getMutabilityPlan() { // Series of backward compatible special cases if ( Serializable.class.isAssignableFrom( typeImplementorClass ) ) { - final JavaType jtd = typeConfiguration - .getJavaTypeRegistry() - .resolveDescriptor( typeImplementorClass ); - final JdbcType jdbcType = typeConfiguration.getJdbcTypeRegistry().getDescriptor( Types.VARBINARY ); - final BasicType resolved = InferredBasicValueResolver.resolveSqlTypeIndicators( - indicators, - typeConfiguration.getBasicTypeRegistry().resolve( jtd, jdbcType ), - jtd - ); @SuppressWarnings({"rawtypes", "unchecked"}) final SerializableType legacyType = new SerializableType( typeImplementorClass ); + return createBasicTypeResolution( legacyType, typeImplementorClass, indicators, typeConfiguration ); + } - return new BasicValue.Resolution<>() { - @Override - public JdbcMapping getJdbcMapping() { - return resolved; - } - - @Override @SuppressWarnings({"rawtypes", "unchecked"}) - public BasicType getLegacyResolvedBasicType() { - return legacyType; - } - - @Override @SuppressWarnings({"rawtypes", "unchecked"}) - public JavaType getDomainJavaType() { - return resolved.getMappedJavaType(); - } - - @Override - public JavaType getRelationalJavaType() { - return resolved.getMappedJavaType(); - } - - @Override - public JdbcType getJdbcType() { - return resolved.getJdbcType(); - } - - @Override - public BasicValueConverter getValueConverter() { - return resolved.getValueConverter(); - } - - @Override @SuppressWarnings({"rawtypes", "unchecked"}) - public MutabilityPlan getMutabilityPlan() { - // a TypeDefinition does not explicitly provide a MutabilityPlan (yet?) - return resolved.isMutable() - ? getDomainJavaType().getMutabilityPlan() - : ImmutableMutabilityPlan.instance(); - } - }; + if ( typeImplementorClass.isInterface() ) { + return createBasicTypeResolution( new JavaObjectType(), typeImplementorClass, indicators, typeConfiguration ); } throw new IllegalArgumentException( @@ -279,13 +236,73 @@ public MutabilityPlan getMutabilityPlan() { ); } + private static BasicValue.Resolution createBasicTypeResolution( + BasicType type, + Class typeImplementorClass, + JdbcTypeIndicators indicators, + TypeConfiguration typeConfiguration + ) { + final JavaType jtd = typeConfiguration + .getJavaTypeRegistry() + .resolveDescriptor( typeImplementorClass ); + final JdbcType jdbcType = typeConfiguration.getJdbcTypeRegistry().getDescriptor( Types.VARBINARY ); + final BasicType resolved = InferredBasicValueResolver.resolveSqlTypeIndicators( + indicators, + typeConfiguration.getBasicTypeRegistry().resolve( jtd, jdbcType ), + jtd + ); + + return new BasicValue.Resolution<>() { + @Override + public JdbcMapping getJdbcMapping() { + return resolved; + } + + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) + public BasicType getLegacyResolvedBasicType() { + return type; + } + + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) + public JavaType getDomainJavaType() { + return resolved.getMappedJavaType(); + } + + @Override + public JavaType getRelationalJavaType() { + return resolved.getMappedJavaType(); + } + + @Override + public JdbcType getJdbcType() { + return resolved.getJdbcType(); + } + + @Override + public BasicValueConverter getValueConverter() { + return resolved.getValueConverter(); + } + + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) + public MutabilityPlan getMutabilityPlan() { + // a TypeDefinition does not explicitly provide a MutabilityPlan (yet?) + return resolved.isMutable() + ? getDomainJavaType().getMutabilityPlan() + : ImmutableMutabilityPlan.instance(); + } + }; + } + private static Object instantiateType( StandardServiceRegistry serviceRegistry, MetadataBuildingOptions buildingOptions, String name, Class typeImplementorClass, BeanInstanceProducer instanceProducer) { - if ( buildingOptions.disallowExtensionsInCdi() ) { + if ( !buildingOptions.isAllowExtensionsInCdi() ) { return name != null ? instanceProducer.produceBeanInstance( name, typeImplementorClass ) : instanceProducer.produceBeanInstance( typeImplementorClass ); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AbstractPropertyHolder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AbstractPropertyHolder.java index 36824fa76aed..1b1875ae3453 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AbstractPropertyHolder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AbstractPropertyHolder.java @@ -30,6 +30,7 @@ import org.hibernate.internal.CoreLogging; import org.hibernate.internal.util.StringHelper; import org.hibernate.usertype.internal.AbstractTimeZoneStorageCompositeUserType; +import org.hibernate.usertype.internal.OffsetTimeCompositeUserType; import org.jboss.logging.Logger; @@ -45,6 +46,7 @@ import jakarta.persistence.JoinTable; import jakarta.persistence.MappedSuperclass; +import static org.hibernate.boot.model.internal.TimeZoneStorageHelper.isOffsetTimeClass; import static org.hibernate.boot.model.internal.TimeZoneStorageHelper.useColumnForTimeZoneStorage; /** @@ -484,11 +486,19 @@ else if ( multipleOverrides != null ) { } } else if ( useColumnForTimeZoneStorage( element, context ) ) { - final Column column = createTimestampColumn( element, path, context ); - columnOverride.put( - path + "." + AbstractTimeZoneStorageCompositeUserType.INSTANT_NAME, - new Column[]{ column } - ); + final Column column = createTemporalColumn( element, path, context ); + if ( isOffsetTimeClass( element ) ) { + columnOverride.put( + path + "." + OffsetTimeCompositeUserType.LOCAL_TIME_NAME, + new Column[] { column } + ); + } + else { + columnOverride.put( + path + "." + AbstractTimeZoneStorageCompositeUserType.INSTANT_NAME, + new Column[] { column } + ); + } final Column offsetColumn = createTimeZoneColumn( element, column ); columnOverride.put( path + "." + AbstractTimeZoneStorageCompositeUserType.ZONE_OFFSET_NAME, @@ -527,7 +537,7 @@ private static Column createTimeZoneColumn(XAnnotatedElement element, Column col } } - private static Column createTimestampColumn(XAnnotatedElement element, String path, MetadataBuildingContext context) { + private static Column createTemporalColumn(XAnnotatedElement element, String path, MetadataBuildingContext context) { int precision; final Column annotatedColumn = element.getAnnotation( Column.class ); if ( annotatedColumn != null ) { diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentBinder.java index 5b1284ef25c9..486cbfafcaa4 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentBinder.java @@ -55,6 +55,7 @@ public static void processAggregate( () -> new EmbeddableAggregateJavaType<>( component.getComponentClass(), structName ) ); component.setStructName( structName ); + component.setStructColumnNames( determineStructAttributeNames( inferredData, returnedClassOrElement ) ); // Determine the aggregate column BasicValueBinder basicValueBinder = new BasicValueBinder( BasicValueBinder.Kind.ATTRIBUTE, component, context ); @@ -135,6 +136,21 @@ private static String determineStructName( return null; } + private static String[] determineStructAttributeNames(PropertyData inferredData, XClass returnedClassOrElement) { + final XProperty property = inferredData.getProperty(); + if ( property != null ) { + final Struct struct = property.getAnnotation( Struct.class ); + if ( struct != null ) { + return struct.attributes(); + } + } + final Struct struct = returnedClassOrElement.getAnnotation( Struct.class ); + if ( struct != null ) { + return struct.attributes(); + } + return null; + } + private static boolean isAggregate(XProperty property, XClass returnedClass) { if ( property != null ) { final Struct struct = property.getAnnotation( Struct.class ); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentSecondPass.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentSecondPass.java index 12ff5820b87b..35132097d387 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentSecondPass.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AggregateComponentSecondPass.java @@ -31,6 +31,7 @@ import org.hibernate.mapping.Component; import org.hibernate.mapping.PersistentClass; import org.hibernate.mapping.Property; +import org.hibernate.mapping.Selectable; import org.hibernate.mapping.UserDefinedType; import org.hibernate.mapping.Value; import org.hibernate.metamodel.internal.EmbeddableHelper; @@ -189,41 +190,57 @@ public void doSecondPass(Map persistentClasses) throws private void orderColumns(UserDefinedType userDefinedType) { final Class componentClass = component.getComponentClass(); final int[] originalOrder = component.sortProperties(); - final int[] propertyMappingIndex; - if ( ReflectHelper.isRecord( componentClass ) ) { - if ( originalOrder == null ) { - propertyMappingIndex = null; + final String[] structColumnNames = component.getStructColumnNames(); + if ( structColumnNames == null || structColumnNames.length == 0 ) { + final int[] propertyMappingIndex; + if ( ReflectHelper.isRecord( componentClass ) ) { + if ( originalOrder == null ) { + propertyMappingIndex = null; + } + else { + final String[] componentNames = ReflectHelper.getRecordComponentNames( componentClass ); + propertyMappingIndex = EmbeddableHelper.determineMappingIndex( + component.getPropertyNames(), + componentNames + ); + } } - else { - final String[] componentNames = ReflectHelper.getRecordComponentNames( componentClass ); - propertyMappingIndex = EmbeddableHelper.determinePropertyMappingIndex( + else if ( component.getInstantiatorPropertyNames() != null ) { + propertyMappingIndex = EmbeddableHelper.determineMappingIndex( component.getPropertyNames(), - componentNames + component.getInstantiatorPropertyNames() ); } - } - else { - if ( component.getInstantiatorPropertyNames() == null ) { - return; + else { + propertyMappingIndex = null; } - propertyMappingIndex = EmbeddableHelper.determinePropertyMappingIndex( - component.getPropertyNames(), - component.getInstantiatorPropertyNames() - ); - } - final ArrayList orderedColumns = new ArrayList<>( userDefinedType.getColumnSpan() ); - final List properties = component.getProperties(); - if ( propertyMappingIndex == null ) { - for ( Property property : properties ) { - addColumns( orderedColumns, property.getValue() ); + if ( propertyMappingIndex == null ) { + // If there is default ordering possible, assume alphabetical ordering + final ArrayList orderedColumns = new ArrayList<>( userDefinedType.getColumnSpan() ); + final List properties = component.getProperties(); + for ( Property property : properties ) { + addColumns( orderedColumns, property.getValue() ); + } + userDefinedType.reorderColumns( orderedColumns ); + } + else { + final ArrayList orderedColumns = new ArrayList<>( userDefinedType.getColumnSpan() ); + final List properties = component.getProperties(); + for ( final int propertyIndex : propertyMappingIndex ) { + addColumns( orderedColumns, properties.get( propertyIndex ).getValue() ); + } + userDefinedType.reorderColumns( orderedColumns ); } } else { - for ( final int propertyIndex : propertyMappingIndex ) { - addColumns( orderedColumns, properties.get( propertyIndex ).getValue() ); + final ArrayList orderedColumns = new ArrayList<>( userDefinedType.getColumnSpan() ); + for ( String structColumnName : structColumnNames ) { + if ( !addColumns( orderedColumns, component, structColumnName ) ) { + throw new MappingException( "Couldn't find column [" + structColumnName + "] that was defined in @Struct(attributes) in the component [" + component.getComponentClassName() + "]" ); + } } + userDefinedType.reorderColumns( orderedColumns ); } - userDefinedType.reorderColumns( orderedColumns ); } private static void addColumns(ArrayList orderedColumns, Value value) { @@ -243,6 +260,33 @@ private static void addColumns(ArrayList orderedColumns, Value value) { } } + private static boolean addColumns(ArrayList orderedColumns, Component component, String structColumnName) { + for ( Property property : component.getProperties() ) { + final Value value = property.getValue(); + if ( value instanceof Component ) { + final Component subComponent = (Component) value; + if ( subComponent.getAggregateColumn() == null ) { + if ( addColumns( orderedColumns, subComponent, structColumnName ) ) { + return true; + } + } + else if ( structColumnName.equals( subComponent.getAggregateColumn().getName() ) ) { + orderedColumns.add( subComponent.getAggregateColumn() ); + return true; + } + } + else { + for ( Selectable selectable : value.getSelectables() ) { + if ( selectable instanceof Column && structColumnName.equals( ( (Column) selectable ).getName() ) ) { + orderedColumns.add( (Column) selectable ); + return true; + } + } + } + } + return false; + } + private void validateSupportedColumnTypes(String basePath, Component component) { for ( Property property : component.getProperties() ) { final Value value = property.getValue(); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedColumn.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedColumn.java index 02beed2fff13..097fe7c36d03 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedColumn.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedColumn.java @@ -881,8 +881,7 @@ private static AnnotatedColumns buildImplicitColumn( // } //not following the spec but more clean if ( nullability != Nullability.FORCED_NULL - && inferredData.getClassOrElement().isPrimitive() - && !inferredData.getProperty().isArray() ) { + && !PropertyBinder.isOptional( inferredData.getProperty(), propertyHolder ) ) { column.setNullable( false ); } final String propertyName = inferredData.getPropertyName(); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedDiscriminatorColumn.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedDiscriminatorColumn.java index dc81b873bde6..2725f4273ed1 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedDiscriminatorColumn.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotatedDiscriminatorColumn.java @@ -94,6 +94,7 @@ private static void setDiscriminatorType( case CHAR: column.setDiscriminatorTypeName( "character" ); column.setImplicit( false ); + column.setLength( 1L ); break; case INTEGER: column.setDiscriminatorTypeName( "integer" ); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotationBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotationBinder.java index 1ea1c6e3a071..3a7ad75cca2e 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotationBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnnotationBinder.java @@ -494,15 +494,9 @@ private static void handleJdbcTypeRegistration( ManagedBeanRegistry managedBeanRegistry, JdbcTypeRegistration annotation) { final Class jdbcTypeClass = annotation.value(); - - final JdbcType jdbcType; - if ( context.getBuildingOptions().disallowExtensionsInCdi() ) { - jdbcType = FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( jdbcTypeClass ); - } - else { - jdbcType = managedBeanRegistry.getBean( jdbcTypeClass ).getBeanInstance(); - } - + final JdbcType jdbcType = !context.getBuildingOptions().isAllowExtensionsInCdi() + ? FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( jdbcTypeClass ) + : managedBeanRegistry.getBean( jdbcTypeClass ).getBeanInstance(); final int typeCode = annotation.registrationCode() == Integer.MIN_VALUE ? jdbcType.getDefaultSqlTypeCode() : annotation.registrationCode(); @@ -514,14 +508,10 @@ private static void handleJavaTypeRegistration( ManagedBeanRegistry managedBeanRegistry, JavaTypeRegistration annotation) { final Class> javaTypeClass = annotation.descriptorClass(); - - final BasicJavaType javaType; - if ( context.getBuildingOptions().disallowExtensionsInCdi() ) { - javaType = FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( javaTypeClass ); - } - else { - javaType = managedBeanRegistry.getBean( javaTypeClass ).getBeanInstance(); - } + final BasicJavaType javaType = + !context.getBuildingOptions().isAllowExtensionsInCdi() + ? FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( javaTypeClass ) + : managedBeanRegistry.getBean( javaTypeClass ).getBeanInstance(); context.getMetadataCollector().addJavaTypeRegistration( annotation.javaType(), javaType ); } @@ -751,16 +741,11 @@ public Dialect getDialect() { } private static JdbcMapping resolveUserType(Class> userTypeClass, MetadataBuildingContext context) { - final UserType userType; - if ( context.getBuildingOptions().disallowExtensionsInCdi() ) { - userType = FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( userTypeClass ); - } - else { - final StandardServiceRegistry serviceRegistry = context.getBootstrapContext().getServiceRegistry(); - final ManagedBeanRegistry beanRegistry = serviceRegistry.getService( ManagedBeanRegistry.class ); - userType = beanRegistry.getBean( userTypeClass ).getBeanInstance(); - } - + final UserType userType = !context.getBuildingOptions().isAllowExtensionsInCdi() + ? FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( userTypeClass ) + : context.getBootstrapContext().getServiceRegistry() + .requireService( ManagedBeanRegistry.class ) + .getBean( userTypeClass ).getBeanInstance(); return new CustomType<>( userType, context.getBootstrapContext().getTypeConfiguration() ); } @@ -812,7 +797,7 @@ private static JavaType getJavaType( return registeredJtd; } - if ( context.getBuildingOptions().disallowExtensionsInCdi() ) { + if ( !context.getBuildingOptions().isAllowExtensionsInCdi() ) { return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( javaTypeClass ); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnyBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnyBinder.java index 6ea2964e2704..b8742a3bd48a 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnyBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/AnyBinder.java @@ -94,6 +94,7 @@ private static void bindAny( } final boolean lazy = any.fetch() == FetchType.LAZY; + final boolean optional = any.optional(); final Any value = BinderHelper.buildAnyValue( property.getAnnotation( Column.class ), getOverridableAnnotation( property, Formula.class, context ), @@ -104,7 +105,7 @@ private static void bindAny( nullability, propertyHolder, entityBinder, - any.optional(), + optional, context ); @@ -119,7 +120,9 @@ private static void bindAny( } binder.setAccessType( inferredData.getDefaultAccess() ); binder.setCascade( cascadeStrategy ); + binder.setBuildingContext( context ); Property prop = binder.makeProperty(); + prop.setOptional( optional && value.isNullable() ); //composite FK columns are in the same table, so it's OK propertyHolder.addProperty( prop, columns, inferredData.getDeclaringClass() ); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BasicValueBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BasicValueBinder.java index 1d2328f82029..0981d2c00095 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BasicValueBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BasicValueBinder.java @@ -68,6 +68,7 @@ import org.hibernate.type.BasicType; import org.hibernate.type.SerializableToBlobType; import org.hibernate.type.descriptor.java.BasicJavaType; +import org.hibernate.type.descriptor.java.Immutability; import org.hibernate.type.descriptor.java.ImmutableMutabilityPlan; import org.hibernate.type.descriptor.java.JavaType; import org.hibernate.type.descriptor.java.MutabilityPlan; @@ -151,6 +152,7 @@ public enum Kind { private TemporalType temporalPrecision; private TimeZoneStorageType timeZoneStorageType; private boolean partitionKey; + private Integer jdbcTypeCode; private Table table; private AnnotatedColumns columns; @@ -430,7 +432,7 @@ private void prepareCollectionId(XProperty modelXProperty) { throw new MappingException( "idbag mapping missing @CollectionId" ); } - final boolean useDeferredBeanContainerAccess = buildingContext.getBuildingOptions().disallowExtensionsInCdi(); + final boolean useDeferredBeanContainerAccess = !buildingContext.getBuildingOptions().isAllowExtensionsInCdi(); final ManagedBeanRegistry beanRegistry = getManagedBeanRegistry(); explicitBasicTypeName = null; @@ -479,20 +481,21 @@ private void prepareCollectionId(XProperty modelXProperty) { explicitMutabilityAccess = (typeConfiguration) -> { final CollectionIdMutability mutabilityAnn = findAnnotation( modelXProperty, CollectionIdMutability.class ); if ( mutabilityAnn != null ) { - final Class> mutabilityClass = normalizeMutability( mutabilityAnn.value() ); + final Class> mutabilityClass = mutabilityAnn.value(); if ( mutabilityClass != null ) { - if ( useDeferredBeanContainerAccess ) { - return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( mutabilityClass ); - } - final ManagedBean> jtdBean = beanRegistry.getBean( mutabilityClass ); - return jtdBean.getBeanInstance(); + return resolveMutability( mutabilityClass ); } } - // see if the value's type Class is annotated `@Immutable` + // see if the value's type Class is annotated with mutability-related annotations if ( implicitJavaTypeAccess != null ) { final Class attributeType = ReflectHelper.getClass( implicitJavaTypeAccess.apply( typeConfiguration ) ); if ( attributeType != null ) { + final Mutability attributeTypeMutabilityAnn = attributeType.getAnnotation( Mutability.class ); + if ( attributeTypeMutabilityAnn != null ) { + return resolveMutability( attributeTypeMutabilityAnn.value() ); + } + if ( attributeType.isAnnotationPresent( Immutable.class ) ) { return ImmutableMutabilityPlan.instance(); } @@ -503,11 +506,7 @@ private void prepareCollectionId(XProperty modelXProperty) { if ( converterDescriptor != null ) { final Mutability converterMutabilityAnn = converterDescriptor.getAttributeConverterClass().getAnnotation( Mutability.class ); if ( converterMutabilityAnn != null ) { - if ( useDeferredBeanContainerAccess ) { - return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( converterMutabilityAnn.value() ); - } - final ManagedBean> jtdBean = beanRegistry.getBean( converterMutabilityAnn.value() ); - return jtdBean.getBeanInstance(); + return resolveMutability( converterMutabilityAnn.value() ); } if ( converterDescriptor.getAttributeConverterClass().isAnnotationPresent( Immutable.class ) ) { @@ -515,17 +514,22 @@ private void prepareCollectionId(XProperty modelXProperty) { } } + // if there is a UserType, see if its Class is annotated with mutability-related annotations final Class> customTypeImpl = Kind.ATTRIBUTE.mappingAccess.customType( modelXProperty ); - if ( customTypeImpl.isAnnotationPresent( Immutable.class ) ) { - return ImmutableMutabilityPlan.instance(); + if ( customTypeImpl != null ) { + final Mutability customTypeMutabilityAnn = customTypeImpl.getAnnotation( Mutability.class ); + if ( customTypeMutabilityAnn != null ) { + return resolveMutability( customTypeMutabilityAnn.value() ); + } + + if ( customTypeImpl.isAnnotationPresent( Immutable.class ) ) { + return ImmutableMutabilityPlan.instance(); + } } // generally, this will trigger usage of the `JavaType#getMutabilityPlan` return null; }; - - // todo (6.0) - handle generator -// final String generator = collectionIdAnn.generator(); } private ManagedBeanRegistry getManagedBeanRegistry() { @@ -551,7 +555,7 @@ private void prepareMapKey( temporalPrecision = mapKeyTemporalAnn.value(); } - final boolean useDeferredBeanContainerAccess = buildingContext.getBuildingOptions().disallowExtensionsInCdi(); + final boolean useDeferredBeanContainerAccess = !buildingContext.getBuildingOptions().isAllowExtensionsInCdi(); explicitJdbcTypeAccess = typeConfiguration -> { final MapKeyJdbcType jdbcTypeAnn = findAnnotation( mapAttribute, MapKeyJdbcType.class ); @@ -600,35 +604,32 @@ private void prepareMapKey( explicitMutabilityAccess = typeConfiguration -> { final MapKeyMutability mutabilityAnn = findAnnotation( mapAttribute, MapKeyMutability.class ); if ( mutabilityAnn != null ) { - final Class> mutabilityClass = normalizeMutability( mutabilityAnn.value() ); + final Class> mutabilityClass = mutabilityAnn.value(); if ( mutabilityClass != null ) { - if ( useDeferredBeanContainerAccess ) { - return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( mutabilityClass ); - } - final ManagedBean> jtdBean = getManagedBeanRegistry().getBean( mutabilityClass ); - return jtdBean.getBeanInstance(); + return resolveMutability( mutabilityClass ); } } - // see if the value's type Class is annotated `@Immutable` + // see if the value's Java Class is annotated with mutability-related annotations if ( implicitJavaTypeAccess != null ) { final Class attributeType = ReflectHelper.getClass( implicitJavaTypeAccess.apply( typeConfiguration ) ); if ( attributeType != null ) { + final Mutability attributeTypeMutabilityAnn = attributeType.getAnnotation( Mutability.class ); + if ( attributeTypeMutabilityAnn != null ) { + return resolveMutability( attributeTypeMutabilityAnn.value() ); + } + if ( attributeType.isAnnotationPresent( Immutable.class ) ) { return ImmutableMutabilityPlan.instance(); } } } - // if the value is converted, see if the converter Class is annotated `@Immutable` + // if the value is converted, see if converter Class is annotated with mutability-related annotations if ( converterDescriptor != null ) { final Mutability converterMutabilityAnn = converterDescriptor.getAttributeConverterClass().getAnnotation( Mutability.class ); if ( converterMutabilityAnn != null ) { - if ( useDeferredBeanContainerAccess ) { - return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( converterMutabilityAnn.value() ); - } - final ManagedBean> jtdBean = getManagedBeanRegistry().getBean( converterMutabilityAnn.value() ); - return jtdBean.getBeanInstance(); + return resolveMutability( converterMutabilityAnn.value() ); } if ( converterDescriptor.getAttributeConverterClass().isAnnotationPresent( Immutable.class ) ) { @@ -636,8 +637,14 @@ private void prepareMapKey( } } + // if there is a UserType, see if its Class is annotated with mutability-related annotations final Class> customTypeImpl = Kind.MAP_KEY.mappingAccess.customType( mapAttribute ); if ( customTypeImpl != null ) { + final Mutability customTypeMutabilityAnn = customTypeImpl.getAnnotation( Mutability.class ); + if ( customTypeMutabilityAnn != null ) { + return resolveMutability( customTypeMutabilityAnn.value() ); + } + if ( customTypeImpl.isAnnotationPresent( Immutable.class ) ) { return ImmutableMutabilityPlan.instance(); } @@ -651,7 +658,7 @@ private void prepareMapKey( private void prepareListIndex(XProperty listAttribute) { implicitJavaTypeAccess = typeConfiguration -> Integer.class; - final boolean useDeferredBeanContainerAccess = buildingContext.getBuildingOptions().disallowExtensionsInCdi(); + final boolean useDeferredBeanContainerAccess = !buildingContext.getBuildingOptions().isAllowExtensionsInCdi(); final ManagedBeanRegistry beanRegistry = buildingContext .getBootstrapContext() .getServiceRegistry() @@ -866,7 +873,7 @@ private void prepareAnyDiscriminator(XProperty modelXProperty) { private void prepareAnyKey(XProperty modelXProperty) { implicitJavaTypeAccess = (typeConfiguration) -> null; - final boolean useDeferredBeanContainerAccess = buildingContext.getBuildingOptions().disallowExtensionsInCdi(); + final boolean useDeferredBeanContainerAccess = !buildingContext.getBuildingOptions().isAllowExtensionsInCdi(); explicitJavaTypeAccess = (typeConfiguration) -> { final AnyKeyJavaType javaTypeAnn = findAnnotation( modelXProperty, AnyKeyJavaType.class ); @@ -923,7 +930,7 @@ private void normalJdbcTypeDetails(XProperty attributeXProperty) { if ( jdbcTypeAnn != null ) { final Class jdbcTypeClass = normalizeJdbcType( jdbcTypeAnn.value() ); if ( jdbcTypeClass != null ) { - if ( buildingContext.getBuildingOptions().disallowExtensionsInCdi() ) { + if ( !buildingContext.getBuildingOptions().isAllowExtensionsInCdi() ) { return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( jdbcTypeClass ); } return getManagedBeanRegistry().getBean( jdbcTypeClass ).getBeanInstance(); @@ -943,53 +950,80 @@ private void normalJdbcTypeDetails(XProperty attributeXProperty) { } private void normalMutabilityDetails(XProperty attributeXProperty) { - explicitMutabilityAccess = typeConfiguration -> { + // Look for `@Mutability` on the attribute final Mutability mutabilityAnn = findAnnotation( attributeXProperty, Mutability.class ); if ( mutabilityAnn != null ) { - final Class> mutability = normalizeMutability( mutabilityAnn.value() ); + final Class> mutability = mutabilityAnn.value(); if ( mutability != null ) { - if ( buildingContext.getBuildingOptions().disallowExtensionsInCdi() ) { - return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( mutability ); - } - return getManagedBeanRegistry().getBean( mutability ).getBeanInstance(); + return resolveMutability( mutability ); } } + // Look for `@Immutable` on the attribute final Immutable immutableAnn = attributeXProperty.getAnnotation( Immutable.class ); if ( immutableAnn != null ) { return ImmutableMutabilityPlan.instance(); } - // see if the value's type Class is annotated `@Immutable` - if ( implicitJavaTypeAccess != null ) { - final Class attributeType = ReflectHelper.getClass( implicitJavaTypeAccess.apply( typeConfiguration ) ); + // Look for `@Mutability` on the attribute's type + if ( explicitJavaTypeAccess != null || implicitJavaTypeAccess != null ) { + Class attributeType = null; + if ( explicitJavaTypeAccess != null ) { + final BasicJavaType jtd = explicitJavaTypeAccess.apply( typeConfiguration ); + if ( jtd != null ) { + attributeType = jtd.getJavaTypeClass(); + } + } + if ( attributeType == null ) { + final java.lang.reflect.Type javaType = implicitJavaTypeAccess.apply( typeConfiguration ); + if ( javaType != null ) { + attributeType = ReflectHelper.getClass( javaType ); + } + } + if ( attributeType != null ) { - if ( attributeType.isAnnotationPresent( Immutable.class ) ) { + final Mutability classMutability = attributeType.getAnnotation( Mutability.class ); + + if ( classMutability != null ) { + final Class> mutability = classMutability.value(); + if ( mutability != null ) { + return resolveMutability( mutability ); + } + } + + final Immutable classImmutable = attributeType.getAnnotation( Immutable.class ); + if ( classImmutable != null ) { return ImmutableMutabilityPlan.instance(); } } } - // if the value is converted, see if the converter Class is annotated `@Immutable` + // if the value is converted, see if the converter Class is annotated `@Mutability` if ( converterDescriptor != null ) { final Mutability converterMutabilityAnn = converterDescriptor.getAttributeConverterClass().getAnnotation( Mutability.class ); if ( converterMutabilityAnn != null ) { - if ( buildingContext.getBuildingOptions().disallowExtensionsInCdi() ) { - return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( converterMutabilityAnn.value() ); - } - final ManagedBean> jtdBean = getManagedBeanRegistry().getBean( converterMutabilityAnn.value() ); - return jtdBean.getBeanInstance(); + final Class> mutability = converterMutabilityAnn.value(); + return resolveMutability( mutability ); } - if ( converterDescriptor.getAttributeConverterClass().isAnnotationPresent( Immutable.class ) ) { + final Immutable converterImmutableAnn = converterDescriptor.getAttributeConverterClass().getAnnotation( Immutable.class ); + if ( converterImmutableAnn != null ) { return ImmutableMutabilityPlan.instance(); } } + // if a custom UserType is specified, see if the UserType Class is annotated `@Mutability` final Class> customTypeImpl = Kind.ATTRIBUTE.mappingAccess.customType( attributeXProperty ); if ( customTypeImpl != null ) { - if ( customTypeImpl.isAnnotationPresent( Immutable.class ) ) { + final Mutability customTypeMutabilityAnn = customTypeImpl.getAnnotation( Mutability.class ); + if ( customTypeMutabilityAnn != null ) { + final Class> mutability = customTypeMutabilityAnn.value(); + return resolveMutability( mutability ); + } + + final Immutable customTypeImmutableAnn = customTypeImpl.getAnnotation( Immutable.class ); + if ( customTypeImmutableAnn != null ) { return ImmutableMutabilityPlan.instance(); } } @@ -999,6 +1033,23 @@ private void normalMutabilityDetails(XProperty attributeXProperty) { }; } + @SuppressWarnings({ "rawtypes", "unchecked" }) + private MutabilityPlan resolveMutability(Class mutability) { + if ( mutability.equals( Immutability.class ) ) { + return Immutability.instance(); + } + + if ( mutability.equals( ImmutableMutabilityPlan.class ) ) { + return ImmutableMutabilityPlan.instance(); + } + + if ( !buildingContext.getBuildingOptions().isAllowExtensionsInCdi() ) { + return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( mutability ); + } + + return getManagedBeanRegistry().getBean( mutability ).getBeanInstance(); + } + private void normalSupplementalDetails(XProperty attributeXProperty) { explicitJavaTypeAccess = typeConfiguration -> { @@ -1006,7 +1057,7 @@ private void normalSupplementalDetails(XProperty attributeXProperty) { if ( javaTypeAnn != null ) { final Class> javaTypeClass = normalizeJavaType( javaTypeAnn.value() ); if ( javaTypeClass != null ) { - if ( buildingContext.getBuildingOptions().disallowExtensionsInCdi() ) { + if ( !buildingContext.getBuildingOptions().isAllowExtensionsInCdi() ) { return FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( javaTypeClass ); } final ManagedBean> jtdBean = getManagedBeanRegistry().getBean( javaTypeClass ); @@ -1022,6 +1073,12 @@ private void normalSupplementalDetails(XProperty attributeXProperty) { return null; }; + final org.hibernate.annotations.JdbcTypeCode jdbcType = + findAnnotation( attributeXProperty, org.hibernate.annotations.JdbcTypeCode.class ); + if ( jdbcType != null ) { + jdbcTypeCode = jdbcType.value(); + } + normalJdbcTypeDetails( attributeXProperty); normalMutabilityDetails( attributeXProperty ); @@ -1067,10 +1124,6 @@ private static Class> normalizeJavaType(Class> normalizeMutability(Class> mutability) { - return mutability; - } - private java.lang.reflect.Type resolveJavaType(XClass returnedClassOrElement) { return buildingContext.getBootstrapContext() .getReflectionManager() @@ -1177,6 +1230,10 @@ public BasicValue make() { basicValue.setTemporalPrecision( temporalPrecision ); } + if ( jdbcTypeCode != null ) { + basicValue.setExplicitJdbcTypeCode( jdbcTypeCode ); + } + linkWithValue(); boolean isInSecondPass = buildingContext.getMetadataCollector().isInSecondPass(); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BinderHelper.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BinderHelper.java index a8728560650e..5f42db351d8c 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BinderHelper.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/BinderHelper.java @@ -13,8 +13,8 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -49,7 +49,6 @@ import org.hibernate.mapping.Column; import org.hibernate.mapping.Component; import org.hibernate.mapping.Join; -import org.hibernate.mapping.JoinedSubclass; import org.hibernate.mapping.MappedSuperclass; import org.hibernate.mapping.PersistentClass; import org.hibernate.mapping.Property; @@ -67,6 +66,7 @@ import jakarta.persistence.OneToOne; import static org.hibernate.boot.model.internal.AnnotatedColumn.buildColumnOrFormulaFromAnnotation; +import static org.hibernate.boot.model.internal.HCANNHelper.findAnnotation; import static org.hibernate.internal.util.StringHelper.isEmpty; import static org.hibernate.internal.util.StringHelper.isNotEmpty; import static org.hibernate.internal.util.StringHelper.qualify; @@ -437,16 +437,23 @@ else if ( columnOwner instanceof Join ) { final String name = collector.getPhysicalColumnName( referencedTable, joinColumn.getReferencedColumn() ); final Column column = new Column( name ); orderedColumns.add( column ); - columnsToProperty.put( column, new HashSet<>() ); + columnsToProperty.put( column, new LinkedHashSet<>() ); //need to use a LinkedHashSet here to make it deterministic } // Now, for each column find the properties of the target entity // which are mapped to that column. (There might be multiple such // properties for each column.) if ( columnOwner instanceof PersistentClass ) { - PersistentClass persistentClass = (PersistentClass) columnOwner; + final PersistentClass persistentClass = (PersistentClass) columnOwner; + // Process ToOne associations after Components, Basic and Id properties + final List toOneProperties = new ArrayList<>(); for ( Property property : persistentClass.getProperties() ) { - matchColumnsByProperty( property, columnsToProperty ); + if ( property.getValue() instanceof ToOne ) { + toOneProperties.add( property ); + } + else { + matchColumnsByProperty( property, columnsToProperty ); + } } if ( persistentClass.hasIdentifierProperty() ) { matchColumnsByProperty( persistentClass.getIdentifierProperty(), columnsToProperty ); @@ -458,6 +465,9 @@ else if ( columnOwner instanceof Join ) { matchColumnsByProperty( p, columnsToProperty ); } } + for ( Property property : toOneProperties ) { + matchColumnsByProperty( property, columnsToProperty ); + } } else { for ( Property property : ((Join) columnOwner).getProperties() ) { @@ -520,7 +530,6 @@ else if ( orderedProperties.contains( property ) ) { } else { // we have the first column of a new property - orderedProperties.add( property ); if ( property.getColumnSpan() > 1 ) { if ( !property.getColumns().get(0).equals( column ) ) { // the columns have to occur in the right order in the property @@ -531,6 +540,7 @@ else if ( orderedProperties.contains( property ) ) { currentProperty = property; lastPropertyColumnIndex = 1; } + orderedProperties.add( property ); } break; // we're only considering the first matching property for now } @@ -815,13 +825,13 @@ public static Any buildAnyValue( private static void processAnyDiscriminatorValues( XProperty property, Consumer consumer) { - final AnyDiscriminatorValue valueAnn = property.getAnnotation( AnyDiscriminatorValue.class ); + final AnyDiscriminatorValue valueAnn = findAnnotation( property, AnyDiscriminatorValue.class ); if ( valueAnn != null ) { consumer.accept( valueAnn ); return; } - final AnyDiscriminatorValues valuesAnn = property.getAnnotation( AnyDiscriminatorValues.class ); + final AnyDiscriminatorValues valuesAnn = findAnnotation( property, AnyDiscriminatorValues.class ); if ( valuesAnn != null ) { for ( AnyDiscriminatorValue discriminatorValue : valuesAnn.value() ) { consumer.accept( discriminatorValue ); @@ -878,7 +888,7 @@ static PropertyData getPropertyOverriddenByMapperOrMapsId( return metadataCollector.getPropertyAnnotatedWithMapsId( mappedClass, isId ? "" : propertyName ); } } - + public static Map toAliasTableMap(SqlFragmentAlias[] aliases){ final Map ret = new HashMap<>(); for ( SqlFragmentAlias alias : aliases ) { @@ -888,7 +898,7 @@ public static Map toAliasTableMap(SqlFragmentAlias[] aliases){ } return ret; } - + public static Map toAliasEntityMap(SqlFragmentAlias[] aliases){ final Map result = new HashMap<>(); for ( SqlFragmentAlias alias : aliases ) { @@ -1072,4 +1082,52 @@ static boolean isCompositeId(XClass entityClass, XProperty idProperty) { public static boolean isDefault(XClass clazz, MetadataBuildingContext context) { return context.getBootstrapContext().getReflectionManager().equals( clazz, void.class ); } + + public static void checkMappedByType( + String mappedBy, + Value targetValue, + String propertyName, + PropertyHolder propertyHolder, + Map persistentClasses) { + final ToOne toOne; + if ( targetValue instanceof Collection ) { + toOne = (ToOne) ( (Collection) targetValue ).getElement(); + } + else if ( targetValue instanceof ToOne ) { + toOne = (ToOne) targetValue; + } + else { + // Nothing to check, EARLY EXIT + return; + } + final String referencedEntityName = toOne.getReferencedEntityName(); + final PersistentClass referencedClass = persistentClasses.get( referencedEntityName ); + PersistentClass ownerClass = propertyHolder.getPersistentClass(); + while ( ownerClass != null ) { + if ( checkReferencedClass( ownerClass, referencedClass ) ) { + return; + } + else { + ownerClass = ownerClass.getSuperPersistentClass(); + } + } + throw new AnnotationException( + "Association '" + qualify( propertyHolder.getPath(), propertyName ) + + "' is 'mappedBy' a property named '" + mappedBy + + "' which references the wrong entity type '" + referencedEntityName + + "', expected '" + propertyHolder.getEntityName() + "'" + ); + } + + private static boolean checkReferencedClass(PersistentClass ownerClass, PersistentClass referencedClass) { + while ( referencedClass != null ) { + // Allow different entity types as long as they map to the same table + if ( ownerClass.getTable() == referencedClass.getTable() ) { + return true; + } + referencedClass = referencedClass.getSuperPersistentClass(); + } + return false; + } + } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ClassPropertyHolder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ClassPropertyHolder.java index 703efb187699..da96b8a53950 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ClassPropertyHolder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ClassPropertyHolder.java @@ -6,11 +6,16 @@ */ package org.hibernate.boot.model.internal; +import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; +import java.util.List; import java.util.Map; +import java.util.function.Consumer; import org.hibernate.AssertionFailure; import org.hibernate.MappingException; +import org.hibernate.PropertyNotFoundException; import org.hibernate.annotations.common.reflection.XClass; import org.hibernate.annotations.common.reflection.XProperty; import org.hibernate.boot.spi.MetadataBuildingContext; @@ -226,7 +231,39 @@ public Join addJoin(JoinTable joinTableAnn, boolean noDelayInPkColumnCreation) { return join; } + /** + * Embeddable classes can be defined using generics. For this reason, we must check + * every property value and specially handle generic components by setting the property + * as generic, to later be able to resolve its concrete type, and creating a new component + * with correctly typed sub-properties for the metamodel. + */ + public static void handleGenericComponentProperty(Property property, MetadataBuildingContext context) { + final Value value = property.getValue(); + if ( value instanceof Component ) { + final Component component = (Component) value; + if ( component.isGeneric() && context.getMetadataCollector() + .getGenericComponent( component.getComponentClass() ) == null ) { + // If we didn't already, register the generic component to use it later + // as the metamodel type for generic embeddable attributes + final Component copy = component.copy(); + copy.setGeneric( false ); + copy.getProperties().clear(); + for ( Property prop : component.getProperties() ) { + prepareActualProperty( + prop, + component.getComponentClass(), + true, + context, + copy::addProperty + ); + } + context.getMetadataCollector().registerGenericComponent( copy ); + } + } + } + private void addPropertyToPersistentClass(Property property, XClass declaringClass) { + handleGenericComponentProperty( property, getContext() ); if ( declaringClass != null ) { final InheritanceState inheritanceState = inheritanceStatePerClass.get( declaringClass ); if ( inheritanceState == null ) { @@ -249,46 +286,55 @@ private void addPropertyToPersistentClass(Property property, XClass declaringCla private void addPropertyToMappedSuperclass(Property prop, XClass declaringClass) { final Class type = getContext().getBootstrapContext().getReflectionManager().toClass( declaringClass ); - MappedSuperclass superclass = getContext().getMetadataCollector().getMappedSuperclass( type ); + final MappedSuperclass superclass = getContext().getMetadataCollector().getMappedSuperclass( type ); + prepareActualProperty( prop, type, true, getContext(), superclass::addDeclaredProperty ); + } + + static void prepareActualProperty( + Property prop, + Class type, + boolean allowCollections, + MetadataBuildingContext context, + Consumer propertyConsumer) { if ( type.getTypeParameters().length == 0 ) { - superclass.addDeclaredProperty( prop ); + propertyConsumer.accept( prop ); } else { // If the type has type parameters, we have to look up the XClass and actual property again // because the given XClass has a TypeEnvironment based on the type variable assignments of a subclass // and that might result in a wrong property type being used for a property which uses a type variable - final XClass actualDeclaringClass = getContext().getBootstrapContext().getReflectionManager().toXClass( type ); - for ( XProperty declaredProperty : actualDeclaringClass.getDeclaredProperties( prop.getPropertyAccessorName() ) ) { + final XClass actualDeclaringClass = context.getBootstrapContext().getReflectionManager().toXClass( type ); + for ( XProperty declaredProperty : getDeclaredProperties( actualDeclaringClass, prop.getPropertyAccessorName() ) ) { if ( prop.getName().equals( declaredProperty.getName() ) ) { final PropertyData inferredData = new PropertyInferredData( actualDeclaringClass, declaredProperty, null, - getContext().getBootstrapContext().getReflectionManager() + context.getBootstrapContext().getReflectionManager() ); final Value originalValue = prop.getValue(); if ( originalValue instanceof SimpleValue ) { // Avoid copying when the property doesn't depend on a type variable - if ( inferredData.getTypeName().equals( getTypeName( originalValue ) ) ) { - superclass.addDeclaredProperty( prop ); + if ( inferredData.getTypeName().equals( getTypeName( prop ) ) ) { + propertyConsumer.accept( prop ); return; } } - if ( originalValue instanceof Component ) { - superclass.addDeclaredProperty( prop ); - return; - } // If the property depends on a type variable, we have to copy it and the Value final Property actualProperty = prop.copy(); + actualProperty.setGeneric( true ); actualProperty.setReturnedClassName( inferredData.getTypeName() ); final Value value = actualProperty.getValue().copy(); if ( value instanceof Collection ) { + if ( !allowCollections ) { + throw new AssertionFailure( "Collections are not allowed as identifier properties" ); + } final Collection collection = (Collection) value; // The owner is a MappedSuperclass which is not a PersistentClass, so set it to null // collection.setOwner( null ); collection.setRole( type.getName() + "." + prop.getName() ); // To copy the element and key values, we need to defer setting the type name until the CollectionBinder ran - getContext().getMetadataCollector().addSecondPass( + context.getMetadataCollector().addSecondPass( new SecondPass() { @Override public void doSecondPass(Map persistentClasses) throws MappingException { @@ -297,7 +343,9 @@ public void doSecondPass(Map persistentClasses) throws MappingException { setTypeName( element, inferredData.getProperty().getElementClass().getName() ); if ( initializedCollection instanceof IndexedCollection ) { final Value index = ( (IndexedCollection) initializedCollection ).getIndex().copy(); - setTypeName( index, inferredData.getProperty().getMapKey().getName() ); + if ( inferredData.getProperty().getMapKey() != null ) { + setTypeName( index, inferredData.getProperty().getMapKey().getName() ); + } ( (IndexedCollection) collection ).setIndex( index ); } collection.setElement( element ); @@ -308,28 +356,54 @@ public void doSecondPass(Map persistentClasses) throws MappingException { else { setTypeName( value, inferredData.getTypeName() ); } -// if ( value instanceof Component ) { -// Component component = ( (Component) value ); -// Iterator propertyIterator = component.getPropertyIterator(); -// while ( propertyIterator.hasNext() ) { -// Property property = propertyIterator.next(); -// try { -// property.getGetter( component.getComponentClass() ); -// } -// catch (PropertyNotFoundException e) { -// propertyIterator.remove(); -// } -// } -// } + if ( value instanceof Component ) { + final Component component = ( (Component) value ); + final Class componentClass = component.getComponentClass(); + if ( component.isGeneric() ) { + actualProperty.setValue( context.getMetadataCollector().getGenericComponent( componentClass ) ); + } + else { + if ( componentClass == Object.class ) { + // Object is not a valid component class, but that is what we get when using a type variable + component.getProperties().clear(); + } + else { + final Iterator propertyIterator = component.getPropertyIterator(); + while ( propertyIterator.hasNext() ) { + try { + propertyIterator.next().getGetter( componentClass ); + } + catch (PropertyNotFoundException e) { + propertyIterator.remove(); + } + } + } + } + } actualProperty.setValue( value ); - superclass.addDeclaredProperty( actualProperty ); + propertyConsumer.accept( actualProperty ); break; } } } } - static String getTypeName(Value value) { + private static List getDeclaredProperties(XClass declaringClass, String accessType) { + final List properties = new ArrayList<>(); + XClass superclass = declaringClass; + while ( superclass != null ) { + properties.addAll( superclass.getDeclaredProperties( accessType ) ); + superclass = superclass.getSuperclass(); + } + return properties; + } + + private static String getTypeName(Property property) { + final String typeName = getTypeName( property.getValue() ); + return typeName != null ? typeName : property.getReturnedClassName(); + } + + private static String getTypeName(Value value) { if ( value instanceof Component ) { final Component component = (Component) value; final String typeName = component.getTypeName(); @@ -341,7 +415,7 @@ static String getTypeName(Value value) { return ( (SimpleValue) value ).getTypeName(); } - static void setTypeName(Value value, String typeName) { + private static void setTypeName(Value value, String typeName) { if ( value instanceof ToOne ) { final ToOne toOne = (ToOne) value; toOne.setReferencedEntityName( typeName ); @@ -349,7 +423,10 @@ static void setTypeName(Value value, String typeName) { } else if ( value instanceof Component ) { final Component component = (Component) value; - component.setComponentClassName( typeName ); + // Avoid setting type name for generic components + if ( !component.isGeneric() ) { + component.setComponentClassName( typeName ); + } if ( component.getTypeName() != null ) { component.setTypeName( typeName ); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionBinder.java index 1cab0abd7eaa..810431ed2a32 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionBinder.java @@ -77,13 +77,13 @@ import org.hibernate.boot.BootLogging; import org.hibernate.boot.model.IdentifierGeneratorDefinition; import org.hibernate.boot.model.TypeDefinition; +import org.hibernate.boot.model.source.internal.hbm.ModelBinder; import org.hibernate.boot.spi.AccessType; import org.hibernate.boot.spi.InFlightMetadataCollector; import org.hibernate.boot.spi.InFlightMetadataCollector.CollectionTypeRegistrationDescriptor; import org.hibernate.boot.spi.MetadataBuildingContext; import org.hibernate.boot.spi.PropertyData; import org.hibernate.boot.spi.SecondPass; -import org.hibernate.engine.config.spi.ConfigurationService; import org.hibernate.engine.jdbc.env.spi.JdbcEnvironment; import org.hibernate.engine.spi.FilterDefinition; import org.hibernate.internal.CoreMessageLogger; @@ -153,6 +153,7 @@ import static org.hibernate.boot.model.internal.AnnotatedJoinColumns.buildJoinColumnsWithDefaultColumnSuffix; import static org.hibernate.boot.model.internal.AnnotatedJoinColumns.buildJoinTableJoinColumns; import static org.hibernate.boot.model.internal.BinderHelper.buildAnyValue; +import static org.hibernate.boot.model.internal.BinderHelper.checkMappedByType; import static org.hibernate.boot.model.internal.BinderHelper.createSyntheticPropertyReference; import static org.hibernate.boot.model.internal.BinderHelper.getCascadeStrategy; import static org.hibernate.boot.model.internal.BinderHelper.getFetchMode; @@ -165,14 +166,12 @@ import static org.hibernate.boot.model.internal.EmbeddableBinder.fillEmbeddable; import static org.hibernate.boot.model.internal.GeneratorBinder.buildGenerators; import static org.hibernate.boot.model.internal.PropertyHolderBuilder.buildPropertyHolder; -import static org.hibernate.cfg.AvailableSettings.USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS; import static org.hibernate.engine.spi.ExecuteUpdateResultCheckStyle.fromResultCheckStyle; import static org.hibernate.internal.util.StringHelper.getNonEmptyOrConjunctionIfBothNonEmpty; import static org.hibernate.internal.util.StringHelper.isEmpty; import static org.hibernate.internal.util.StringHelper.isNotEmpty; import static org.hibernate.internal.util.StringHelper.nullIfEmpty; import static org.hibernate.internal.util.StringHelper.qualify; -import static org.hibernate.internal.util.config.ConfigurationHelper.getBoolean; /** * Base class for stateful binders responsible for producing mapping model objects of type {@link Collection}. @@ -847,7 +846,7 @@ private static ManagedBean createCustomType( Map parameters, MetadataBuildingContext buildingContext) { final boolean hasParameters = CollectionHelper.isNotEmpty( parameters ); - if ( buildingContext.getBuildingOptions().disallowExtensionsInCdi() ) { + if ( !buildingContext.getBuildingOptions().isAllowExtensionsInCdi() ) { // if deferred container access is enabled, we locally create the user-type return MappingHelper.createLocalUserCollectionTypeBean( role, implementation, hasParameters, parameters ); } @@ -1277,6 +1276,7 @@ private void bindProperty() { binder.setProperty( property ); binder.setInsertable( insertable ); binder.setUpdatable( updatable ); + binder.setBuildingContext( buildingContext ); Property prop = binder.makeProperty(); //we don't care about the join stuffs because the column is on the association table. if ( !declaringClassSet ) { @@ -1541,8 +1541,7 @@ public void secondPass(Map persistentClasses) throws Ma * return true if it's a Fk, false if it's an association table */ protected boolean bindStarToManySecondPass(Map persistentClasses) { - final PersistentClass persistentClass = persistentClasses.get( getElementType().getName() ); - if ( noAssociationTable( persistentClass ) ) { + if ( noAssociationTable( persistentClasses ) ) { //this is a foreign key bindOneToManySecondPass( persistentClasses ); return true; @@ -1554,25 +1553,34 @@ protected boolean bindStarToManySecondPass(Map persiste } } - private boolean isReversePropertyInJoin(XClass elementType, PersistentClass persistentClass) { - if ( persistentClass != null && isUnownedCollection()) { + private boolean isReversePropertyInJoin( + XClass elementType, + PersistentClass persistentClass, + Map persistentClasses) { + if ( persistentClass != null && isUnownedCollection() ) { + final Property mappedByProperty; try { - return persistentClass.getJoinNumber( persistentClass.getRecursiveProperty( mappedBy ) ) != 0; + mappedByProperty = persistentClass.getRecursiveProperty( mappedBy ); } catch (MappingException e) { - throw new AnnotationException( "Collection '" + safeCollectionRole() - + "' is 'mappedBy' a property named '" + mappedBy - + "' which does not exist in the target entity '" + elementType.getName() + "'" ); + throw new AnnotationException( + "Collection '" + safeCollectionRole() + + "' is 'mappedBy' a property named '" + mappedBy + + "' which does not exist in the target entity '" + elementType.getName() + "'" + ); } + checkMappedByType( mappedBy, mappedByProperty.getValue(), propertyName, propertyHolder, persistentClasses ); + return persistentClass.getJoinNumber( mappedByProperty ) != 0; } else { return false; } } - private boolean noAssociationTable(PersistentClass persistentClass) { + private boolean noAssociationTable(Map persistentClasses) { + final PersistentClass persistentClass = persistentClasses.get( getElementType().getName() ); return persistentClass != null - && !isReversePropertyInJoin( getElementType(), persistentClass ) + && !isReversePropertyInJoin( getElementType(), persistentClass, persistentClasses ) && oneToMany && !isExplicitAssociationTable && ( implicitJoinColumn() || explicitForeignJoinColumn() ); @@ -1789,8 +1797,7 @@ private String getWhereOnCollectionClause() { private String getWhereOnClassClause() { if ( property.getElementClass() != null ) { final Where whereOnClass = getOverridableAnnotation( property.getElementClass(), Where.class, getBuildingContext() ); - return whereOnClass != null - && ( whereOnClass.applyInToManyFetch() || useEntityWhereClauseForCollections() ) + return whereOnClass != null && ModelBinder.useEntityWhereClauseForCollections( buildingContext ) ? whereOnClass.clause() : null; } @@ -1799,18 +1806,6 @@ private String getWhereOnClassClause() { } } - private boolean useEntityWhereClauseForCollections() { - return getBoolean( - USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS, - buildingContext - .getBuildingOptions() - .getServiceRegistry() - .getService( ConfigurationService.class ) - .getSettings(), - true - ); - } - private void addFilter(boolean hasAssociationTable, FilterJoinTable filter) { if ( hasAssociationTable ) { collection.addFilter( @@ -2131,15 +2126,17 @@ private void handleElementCollection(XClass elementType, String hqlOrderBy) { propertyHolder, buildingContext ); - holder.prepare( property ); final Class> compositeUserType = resolveCompositeUserType( property, elementClass, buildingContext ); - if ( classType == EMBEDDABLE || compositeUserType != null ) { + boolean isComposite = classType == EMBEDDABLE || compositeUserType != null; + holder.prepare( property, isComposite ); + + if ( isComposite ) { handleCompositeCollectionElement( hqlOrderBy, elementClass, holder, compositeUserType ); } else { - handleCollectionElement( elementType, hqlOrderBy, elementClass, holder ); + handleCollectionElement( elementType, hqlOrderBy, elementClass, holder ); } } @@ -2619,7 +2616,9 @@ public void bindManyToManyInverseForeignKey( AnnotatedJoinColumns joinColumns, SimpleValue value, boolean unique) { - if ( isUnownedCollection() ) { + // This method is also called for entity valued map keys, so we must consider + // the mappedBy of the join columns instead of the collection's one + if ( joinColumns.hasMappedBy() ) { bindUnownedManyToManyInverseForeignKey( targetEntity, joinColumns, value ); } else { diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionPropertyHolder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionPropertyHolder.java index 8e743d228dec..5a1bccefb7c2 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionPropertyHolder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionPropertyHolder.java @@ -78,6 +78,7 @@ public Collection getCollectionBinding() { private void buildAttributeConversionInfoMaps( XProperty collectionProperty, + boolean isComposite, Map elementAttributeConversionInfoMap, Map keyAttributeConversionInfoMap) { if ( collectionProperty == null ) { @@ -88,7 +89,13 @@ private void buildAttributeConversionInfoMaps( { final Convert convertAnnotation = collectionProperty.getAnnotation( Convert.class ); if ( convertAnnotation != null ) { - applyLocalConvert( convertAnnotation, collectionProperty, elementAttributeConversionInfoMap, keyAttributeConversionInfoMap ); + applyLocalConvert( + convertAnnotation, + collectionProperty, + isComposite, + elementAttributeConversionInfoMap, + keyAttributeConversionInfoMap + ); } } @@ -99,6 +106,7 @@ private void buildAttributeConversionInfoMaps( applyLocalConvert( convertAnnotation, collectionProperty, + isComposite, elementAttributeConversionInfoMap, keyAttributeConversionInfoMap ); @@ -110,14 +118,15 @@ private void buildAttributeConversionInfoMaps( private void applyLocalConvert( Convert convertAnnotation, XProperty collectionProperty, + boolean isComposite, Map elementAttributeConversionInfoMap, Map keyAttributeConversionInfoMap) { - // IMPL NOTE : the rules here are quite more lenient than what JPA says. For example, JPA says - // that @Convert on a Map always needs to specify attributeName of key/value (or prefixed with - // key./value. for embedded paths). However, we try to see if conversion of either is disabled - // for whatever reason. For example, if the Map is annotated with @Enumerated the elements cannot - // be converted so any @Convert likely meant the key, so we apply it to the key + // IMPL NOTE : the rules here are quite more lenient than what JPA says. For example, JPA says that @Convert + // on a Map of basic types should default to "value" but it should explicitly specify attributeName of "key" + // (or prefixed with "key." for embedded paths) to be applied on the key. However, we try to see if conversion + // of either is disabled for whatever reason. For example, if the Map is annotated with @Enumerated the + // elements cannot be converted so any @Convert likely meant the key, so we apply it to the key final AttributeConversionInfo info = new AttributeConversionInfo( convertAnnotation, collectionProperty ); if ( collection.isMap() ) { @@ -132,10 +141,15 @@ private void applyLocalConvert( if ( isEmpty( info.getAttributeName() ) ) { // the @Convert did not name an attribute... if ( canElementBeConverted && canKeyBeConverted ) { - throw new IllegalStateException( - "@Convert placed on Map attribute [" + collection.getRole() - + "] must define attributeName of 'key' or 'value'" - ); + if ( !isComposite ) { + // if element is of basic type default to "value" + elementAttributeConversionInfoMap.put( "", info ); + } + else { + throw new IllegalStateException( + "@Convert placed on Map attribute [" + collection.getRole() + + "] of non-basic types must define attributeName of 'key' or 'value'" ); + } } else if ( canKeyBeConverted ) { keyAttributeConversionInfoMap.put( "", info ); @@ -325,7 +339,7 @@ public String toString() { boolean prepared; - public void prepare(XProperty collectionProperty) { + public void prepare(XProperty collectionProperty, boolean isComposite) { // fugly if ( prepared ) { return; @@ -377,7 +391,12 @@ else if ( collectionProperty.isAnnotationPresent( CollectionType.class ) ) { // Is it valid to reference a collection attribute in a @Convert attached to the owner (entity) by path? // if so we should pass in 'clazzToProcess' also if ( canKeyBeConverted || canElementBeConverted ) { - buildAttributeConversionInfoMaps( collectionProperty, elementAttributeConversionInfoMap, keyAttributeConversionInfoMap ); + buildAttributeConversionInfoMaps( + collectionProperty, + isComposite, + elementAttributeConversionInfoMap, + keyAttributeConversionInfoMap + ); } } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EmbeddableBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EmbeddableBinder.java index 536483bd33fd..c210e793e3e4 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EmbeddableBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EmbeddableBinder.java @@ -38,6 +38,7 @@ import org.hibernate.usertype.CompositeUserType; import jakarta.persistence.Column; +import jakarta.persistence.Convert; import jakarta.persistence.Embeddable; import jakarta.persistence.Embedded; import jakarta.persistence.EmbeddedId; @@ -146,7 +147,7 @@ static PropertyBinder createCompositeBinder( static boolean isEmbedded(XProperty property, XClass returnedClass) { return property.isAnnotationPresent( Embedded.class ) || property.isAnnotationPresent( EmbeddedId.class ) - || returnedClass.isAnnotationPresent( Embeddable.class ); + || returnedClass.isAnnotationPresent( Embeddable.class ) && !property.isAnnotationPresent( Convert.class ); } private static Component bindEmbeddable( @@ -192,7 +193,7 @@ private static Component bindEmbeddable( entityBinder, isComponentEmbedded, isIdentifierMapper, - false, + context.getMetadataCollector().isInSecondPass(), customInstantiatorImpl, compositeUserTypeClass, annotatedColumns, @@ -297,7 +298,8 @@ static Component fillEmbeddable( compositeUserTypeClass, columns, context, - inheritanceStatePerClass + inheritanceStatePerClass, + false ); } @@ -315,7 +317,8 @@ static Component fillEmbeddable( Class> compositeUserTypeClass, AnnotatedColumns columns, MetadataBuildingContext context, - Map inheritanceStatePerClass) { + Map inheritanceStatePerClass, + boolean isIdClass) { // inSecondPass can only be used to apply right away the second pass of a composite-element // Because it's a value type, there is no bidirectional association, hence second pass // ordering does not matter @@ -357,7 +360,7 @@ static Component fillEmbeddable( final XClass annotatedClass = inferredData.getPropertyClass(); final List classElements = - collectClassElements( propertyAccessor, context, returnedClassOrElement, annotatedClass ); + collectClassElements( propertyAccessor, context, returnedClassOrElement, annotatedClass, isIdClass ); final List baseClassElements = collectBaseClassElements( baseInferredData, propertyAccessor, context, annotatedClass ); if ( baseClassElements != null @@ -402,7 +405,7 @@ static Component fillEmbeddable( private static CompositeUserType compositeUserType( Class> compositeUserTypeClass, MetadataBuildingContext context) { - if ( context.getBuildingOptions().disallowExtensionsInCdi() ) { + if ( !context.getBuildingOptions().isAllowExtensionsInCdi() ) { FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( compositeUserTypeClass ); } @@ -417,7 +420,8 @@ private static List collectClassElements( AccessType propertyAccessor, MetadataBuildingContext context, XClass returnedClassOrElement, - XClass annotatedClass) { + XClass annotatedClass, + boolean isIdClass) { final List classElements = new ArrayList<>(); //embeddable elements can have type defs final PropertyContainer container = @@ -425,7 +429,8 @@ private static List collectClassElements( addElementsOfClass( classElements, container, context); //add elements of the embeddable's mapped superclasses XClass superClass = annotatedClass.getSuperclass(); - while ( superClass != null && superClass.isAnnotationPresent( MappedSuperclass.class ) ) { + while ( superClass != null && ( superClass.isAnnotationPresent( MappedSuperclass.class ) + || ( isIdClass && !Object.class.getName().equals( superClass.getName() ) ) ) ) { //FIXME: proper support of type variables incl var resolved at upper levels final PropertyContainer superContainer = new PropertyContainer( superClass, annotatedClass, propertyAccessor ); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EntityBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EntityBinder.java index 6c24c4c388fe..6279358bcfd5 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EntityBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/EntityBinder.java @@ -17,27 +17,6 @@ import java.util.Map; import java.util.Set; -import jakarta.persistence.Access; -import jakarta.persistence.AttributeOverride; -import jakarta.persistence.AttributeOverrides; -import jakarta.persistence.Cacheable; -import jakarta.persistence.ConstraintMode; -import jakarta.persistence.DiscriminatorColumn; -import jakarta.persistence.DiscriminatorValue; -import jakarta.persistence.Entity; -import jakarta.persistence.IdClass; -import jakarta.persistence.Inheritance; -import jakarta.persistence.InheritanceType; -import jakarta.persistence.JoinColumn; -import jakarta.persistence.JoinTable; -import jakarta.persistence.NamedEntityGraph; -import jakarta.persistence.NamedEntityGraphs; -import jakarta.persistence.PrimaryKeyJoinColumn; -import jakarta.persistence.PrimaryKeyJoinColumns; -import jakarta.persistence.SecondaryTable; -import jakarta.persistence.SecondaryTables; -import jakarta.persistence.SharedCacheMode; -import jakarta.persistence.UniqueConstraint; import org.hibernate.AnnotationException; import org.hibernate.AssertionFailure; import org.hibernate.MappingException; @@ -56,6 +35,7 @@ import org.hibernate.annotations.HQLSelect; import org.hibernate.annotations.Immutable; import org.hibernate.annotations.Loader; +import org.hibernate.annotations.Mutability; import org.hibernate.annotations.NaturalIdCache; import org.hibernate.annotations.OnDelete; import org.hibernate.annotations.OptimisticLockType; @@ -125,6 +105,28 @@ import org.jboss.logging.Logger; +import jakarta.persistence.Access; +import jakarta.persistence.AttributeOverride; +import jakarta.persistence.AttributeOverrides; +import jakarta.persistence.Cacheable; +import jakarta.persistence.ConstraintMode; +import jakarta.persistence.DiscriminatorColumn; +import jakarta.persistence.DiscriminatorValue; +import jakarta.persistence.Entity; +import jakarta.persistence.IdClass; +import jakarta.persistence.Inheritance; +import jakarta.persistence.InheritanceType; +import jakarta.persistence.JoinColumn; +import jakarta.persistence.JoinTable; +import jakarta.persistence.NamedEntityGraph; +import jakarta.persistence.NamedEntityGraphs; +import jakarta.persistence.PrimaryKeyJoinColumn; +import jakarta.persistence.PrimaryKeyJoinColumns; +import jakarta.persistence.SecondaryTable; +import jakarta.persistence.SecondaryTables; +import jakarta.persistence.SharedCacheMode; +import jakarta.persistence.UniqueConstraint; + import static org.hibernate.boot.model.internal.AnnotatedClassType.MAPPED_SUPERCLASS; import static org.hibernate.boot.model.internal.AnnotatedDiscriminatorColumn.buildDiscriminatorColumn; import static org.hibernate.boot.model.internal.AnnotatedJoinColumn.buildInheritanceJoinColumn; @@ -132,10 +134,10 @@ import static org.hibernate.boot.model.internal.BinderHelper.getOverridableAnnotation; import static org.hibernate.boot.model.internal.BinderHelper.hasToOneAnnotation; import static org.hibernate.boot.model.internal.BinderHelper.isDefault; -import static org.hibernate.boot.model.internal.GeneratorBinder.makeIdGenerator; import static org.hibernate.boot.model.internal.BinderHelper.toAliasEntityMap; import static org.hibernate.boot.model.internal.BinderHelper.toAliasTableMap; import static org.hibernate.boot.model.internal.EmbeddableBinder.fillEmbeddable; +import static org.hibernate.boot.model.internal.GeneratorBinder.makeIdGenerator; import static org.hibernate.boot.model.internal.HCANNHelper.findContainingAnnotations; import static org.hibernate.boot.model.internal.InheritanceState.getInheritanceStateOfSuperEntity; import static org.hibernate.boot.model.internal.PropertyBinder.addElementsOfClass; @@ -401,7 +403,8 @@ private boolean mapAsIdClass( classWithIdClass, compositeClass, baseInferredData, - propertyAccessor + propertyAccessor, + true ); setIgnoreIdAnnotations( ignoreIdAnnotations ); for ( Property property : mapper.getProperties() ) { @@ -423,7 +426,8 @@ private Component createMapperProperty( XClass classWithIdClass, XClass compositeClass, PropertyData baseInferredData, - AccessType propertyAccessor) { + AccessType propertyAccessor, + boolean isIdClass) { final Component mapper = createMapper( inheritanceStates, persistentClass, @@ -432,7 +436,8 @@ private Component createMapperProperty( classWithIdClass, compositeClass, baseInferredData, - propertyAccessor + propertyAccessor, + isIdClass ); final Property mapperProperty = new Property(); mapperProperty.setName( NavigablePath.IDENTIFIER_MAPPER_PROPERTY ); @@ -452,7 +457,8 @@ private Component createMapper( XClass classWithIdClass, XClass compositeClass, PropertyData baseInferredData, - AccessType propertyAccessor) { + AccessType propertyAccessor, + boolean isIdClass) { final Component mapper = fillEmbeddable( propertyHolder, new PropertyPreloadedData( @@ -471,7 +477,8 @@ private Component createMapper( null, null, context, - inheritanceStates + inheritanceStates, + isIdClass ); persistentClass.setIdentifierMapper( mapper ); @@ -571,7 +578,8 @@ private void bindIdClass( null, null, buildingContext, - inheritanceStates + inheritanceStates, + true ); id.setKey( true ); if ( rootClass.getIdentifier() != null ) { @@ -1186,10 +1194,12 @@ public void bindEntity() { if ( persistentClass instanceof RootClass ) { bindRootEntity(); } - else if ( isMutable() ) { + else if ( !isMutable() ) { LOG.immutableAnnotationOnNonRoot( annotatedClass.getName() ); } + ensureNoMutabilityPlan(); + bindCustomPersister(); bindCustomSql(); bindSynchronize(); @@ -1200,6 +1210,12 @@ else if ( isMutable() ) { processNamedEntityGraphs(); } + private void ensureNoMutabilityPlan() { + if ( annotatedClass.isAnnotationPresent( Mutability.class ) ) { + throw new MappingException( "@Mutability is not allowed on entity" ); + } + } + private boolean isMutable() { return !annotatedClass.isAnnotationPresent(Immutable.class); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/MapBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/MapBinder.java index 20f3ce78c710..bd4fc1cf836d 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/MapBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/MapBinder.java @@ -37,6 +37,7 @@ import org.hibernate.mapping.Table; import org.hibernate.mapping.Value; import org.hibernate.resource.beans.spi.ManagedBean; +import org.hibernate.type.BasicType; import org.hibernate.usertype.CompositeUserType; import org.hibernate.usertype.UserCollectionType; @@ -278,7 +279,7 @@ private CollectionPropertyHolder buildCollectionPropertyHolder( // 'holder' is the CollectionPropertyHolder. // 'property' is the collection XProperty propertyHolder.startingProperty( property ); - holder.prepare(property); + holder.prepare( property, !( collection.getKey().getType() instanceof BasicType ) ); return holder; } @@ -506,7 +507,7 @@ private DependantBasicValue createDependantBasicValue(Table mapKeyTable, BasicVa private static void addSelectable(SimpleValue targetValue, Selectable selectable) { if ( selectable instanceof Column ) { - targetValue.addColumn( ( (Column) selectable).clone() ); + targetValue.addColumn( ( (Column) selectable).clone(), false, false ); } else if ( selectable instanceof Formula ) { targetValue.addFormula( new Formula( ( (Formula) selectable).getFormula() ) ); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/OneToOneSecondPass.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/OneToOneSecondPass.java index 232ec5ec974e..56c2abd5309f 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/OneToOneSecondPass.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/OneToOneSecondPass.java @@ -32,6 +32,7 @@ import jakarta.persistence.ForeignKey; +import static org.hibernate.boot.model.internal.BinderHelper.checkMappedByType; import static org.hibernate.boot.model.internal.BinderHelper.findPropertyByName; import static org.hibernate.boot.model.internal.BinderHelper.getPath; import static org.hibernate.boot.model.internal.ToOneBinder.bindForeignKeyNameAndDefinition; @@ -111,6 +112,7 @@ public void doSecondPass(Map persistentClasses) throws binder.setValue( value ); binder.setCascade( cascadeStrategy ); binder.setAccessType( inferredData.getDefaultAccess() ); + binder.setBuildingContext( buildingContext ); final LazyGroup lazyGroupAnnotation = property.getAnnotation( LazyGroup.class ); if ( lazyGroupAnnotation != null ) { @@ -152,6 +154,13 @@ else if ( targetProperty.getValue() instanceof ManyToOne ) { + "' of the target entity type '" + oneToOne.getReferencedEntityName() + "' which is not a '@OneToOne' or '@ManyToOne' association" ); } + checkMappedByType( + mappedBy, + targetProperty.getValue(), + oneToOne.getPropertyName(), + propertyHolder, + persistentClasses + ); } private void bindTargetManyToOne( @@ -178,6 +187,7 @@ private void bindTargetManyToOne( manyToOne.setFetchMode( oneToOne.getFetchMode() ); manyToOne.setLazy( oneToOne.isLazy() ); manyToOne.setReferencedEntityName( oneToOne.getReferencedEntityName() ); + manyToOne.setReferencedPropertyName( mappedBy ); manyToOne.setUnwrapProxy( oneToOne.isUnwrapProxy() ); manyToOne.markAsLogicalOneToOne(); property.setValue( manyToOne ); @@ -200,7 +210,7 @@ private void bindTargetManyToOne( final KeyValue targetEntityIdentifier = targetEntity.getIdentifier(); boolean referenceToPrimaryKey = mappedBy == null || targetEntityIdentifier instanceof Component - && !( (Component) targetEntityIdentifier ).hasProperty( mappedBy ); + && ( (Component) targetEntityIdentifier ).hasProperty( mappedBy ); oneToOne.setReferenceToPrimaryKey( referenceToPrimaryKey ); final String propertyRef = oneToOne.getReferencedPropertyName(); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/OptionalDeterminationSecondPass.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/OptionalDeterminationSecondPass.java new file mode 100644 index 000000000000..393f8544bfdb --- /dev/null +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/OptionalDeterminationSecondPass.java @@ -0,0 +1,13 @@ +/* + * Hibernate, Relational Persistence for Idiomatic Java + * + * License: GNU Lesser General Public License (LGPL), version 2.1 or later. + * See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html. + */ +package org.hibernate.boot.model.internal; + + +import org.hibernate.boot.spi.SecondPass; + +public interface OptionalDeterminationSecondPass extends SecondPass { +} diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/PropertyBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/PropertyBinder.java index 2d418329e545..a73fbb07b032 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/PropertyBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/PropertyBinder.java @@ -8,7 +8,6 @@ import java.lang.annotation.Annotation; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -29,7 +28,6 @@ import org.hibernate.AnnotationException; import org.hibernate.AssertionFailure; import org.hibernate.MappingException; -import org.hibernate.PropertyNotFoundException; import org.hibernate.annotations.Any; import org.hibernate.annotations.AttributeBinderType; import org.hibernate.annotations.CompositeType; @@ -50,13 +48,16 @@ import org.hibernate.boot.spi.AccessType; import org.hibernate.boot.spi.MetadataBuildingContext; import org.hibernate.boot.spi.PropertyData; +import org.hibernate.boot.spi.SecondPass; import org.hibernate.engine.OptimisticLockStyle; import org.hibernate.internal.CoreMessageLogger; import org.hibernate.mapping.Collection; import org.hibernate.mapping.Component; import org.hibernate.mapping.GeneratorCreator; +import org.hibernate.mapping.Join; import org.hibernate.mapping.KeyValue; import org.hibernate.mapping.MappedSuperclass; +import org.hibernate.mapping.PersistentClass; import org.hibernate.mapping.Property; import org.hibernate.mapping.RootClass; import org.hibernate.mapping.SimpleValue; @@ -71,6 +72,8 @@ import static org.hibernate.boot.model.internal.AnyBinder.bindAny; import static org.hibernate.boot.model.internal.BinderHelper.isCompositeId; import static org.hibernate.boot.model.internal.BinderHelper.isGlobalGeneratorNameGlobal; +import static org.hibernate.boot.model.internal.ClassPropertyHolder.handleGenericComponentProperty; +import static org.hibernate.boot.model.internal.ClassPropertyHolder.prepareActualProperty; import static org.hibernate.boot.model.internal.CollectionBinder.bindCollection; import static org.hibernate.boot.model.internal.GeneratorBinder.createForeignGenerator; import static org.hibernate.boot.model.internal.GeneratorBinder.createIdGenerator; @@ -339,62 +342,13 @@ private Property bind(Property property) { } private void setDeclaredIdentifier(RootClass rootClass, MappedSuperclass superclass, Property prop) { + handleGenericComponentProperty( prop, buildingContext ); if ( superclass == null ) { rootClass.setDeclaredIdentifierProperty( prop ); return; } - // If the type has type parameters, we have to set the declared identifier property on the rootClass - // to be able to retrieve it with the correct type based on type variable assignment in the subclass final Class type = buildingContext.getBootstrapContext().getReflectionManager().toClass( declaringClass ); - if ( type.getTypeParameters().length == 0 ) { - superclass.setDeclaredIdentifierProperty( prop ); - } - else { - // If the type has type parameters, we have to look up the XClass and actual property again - // because the given XClass has a TypeEnvironment based on the type variable assignments of a subclass - // and that might result in a wrong property type being used for a property which uses a type variable - final XClass actualDeclaringClass = buildingContext.getBootstrapContext().getReflectionManager().toXClass( type ); - for ( XProperty declaredProperty : actualDeclaringClass.getDeclaredProperties( prop.getPropertyAccessorName() ) ) { - if ( prop.getName().equals( declaredProperty.getName() ) ) { - final PropertyData inferredData = new PropertyInferredData( - actualDeclaringClass, - declaredProperty, - null, - buildingContext.getBootstrapContext().getReflectionManager() - ); - final Value originalValue = prop.getValue(); - if ( originalValue instanceof SimpleValue ) { - // Avoid copying when the property doesn't depend on a type variable - if ( inferredData.getTypeName().equals( ClassPropertyHolder.getTypeName( originalValue ) ) ) { - superclass.setDeclaredIdentifierProperty( prop ); - return; - } - } - // If the property depends on a type variable, we have to copy it and the Value - final Property actualProperty = prop.copy(); - actualProperty.setReturnedClassName( inferredData.getTypeName() ); - final Value value = actualProperty.getValue().copy(); - assert !(value instanceof Collection); - ClassPropertyHolder.setTypeName( value, inferredData.getTypeName() ); - if ( value instanceof Component ) { - Component component = ( (Component) value ); - Iterator propertyIterator = component.getPropertyIterator(); - while ( propertyIterator.hasNext() ) { - Property property = propertyIterator.next(); - try { - property.getGetter( component.getComponentClass() ); - } - catch (PropertyNotFoundException e) { - propertyIterator.remove(); - } - } - } - actualProperty.setValue( value ); - superclass.setDeclaredIdentifierProperty( actualProperty ); - break; - } - } - } + prepareActualProperty( prop, type, false, buildingContext, superclass::setDeclaredIdentifierProperty ); } private Component getOrCreateCompositeId(RootClass rootClass) { @@ -497,7 +451,31 @@ private void handleMutability(Property property) { private void handleOptional(Property property) { if ( this.property != null ) { - property.setOptional( !isId && isOptional( this.property ) ); + property.setOptional( !isId && isOptional( this.property, this.holder ) ); + if ( property.isOptional() ) { + final OptionalDeterminationSecondPass secondPass = persistentClasses -> { + // Defer determining whether a property and its columns are nullable, + // as handleOptional might be called when the value is not yet fully initialized + if ( property.getPersistentClass() != null ) { + for ( Join join : property.getPersistentClass().getJoins() ) { + if ( join.getProperties().contains( property ) ) { + // If this property is part of a join it is inherently optional + return; + } + } + } + + if ( !property.getValue().isNullable() ) { + property.setOptional( false ); + } + }; + // Always register this as second pass and never execute it directly, + // even if we are in a second pass already. + // If we are in a second pass, then we are currently processing the generalSecondPassList + // to which the following call will add the second pass to, + // so it will be executed within that second pass, just a bit later + buildingContext.getMetadataCollector().addSecondPass( secondPass ); + } } } @@ -1222,10 +1200,12 @@ private static boolean isExplicitlyOptional(XProperty property) { * Should this property be considered optional, taking into * account whether it is primitive? */ - private static boolean isOptional(XProperty property) { + public static boolean isOptional(XProperty property, PropertyHolder propertyHolder) { return property.isAnnotationPresent( Basic.class ) ? property.getAnnotation( Basic.class ).optional() - : property.isArray() || !property.getClassOrElementClass().isPrimitive(); + : property.isArray() + || propertyHolder != null && propertyHolder.isComponent() + || !property.getClassOrElementClass().isPrimitive(); } private static boolean isLazy(XProperty property) { diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TableBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TableBinder.java index 7cf8feca4081..6177b609bfd1 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TableBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TableBinder.java @@ -57,7 +57,6 @@ * @author Emmanuel Bernard */ public class TableBinder { - //TODO move it to a getter/setter strategy private static final CoreMessageLogger LOG = Logger.getMessageLogger( CoreMessageLogger.class, TableBinder.class.getName() ); private MetadataBuildingContext buildingContext; @@ -550,6 +549,12 @@ else if ( firstColumn.isImplicit() ) { // if columns are implicit, then create the columns based // on the referenced entity id columns bindImplicitColumns( referencedEntity, joinColumns, value ); + if ( value instanceof ToOne ) { + // in the case of implicit foreign-keys, make sure the columns making up + // the foreign-key do not get resorted since the order is already properly + // ascertained from the referenced identifier + ( (ToOne) value ).setSorted( true ); + } } else { bindExplicitColumns( referencedEntity, joinColumns, value, buildingContext, associatedClass ); @@ -586,7 +591,6 @@ private static void bindImplicitPrimaryKeyReference( PersistentClass associatedClass) { //implicit case, we hope PK and FK columns are in the same order if ( joinColumns.getColumns().size() != referencedEntity.getIdentifier().getColumnSpan() ) { - // TODO: what about secondary tables?? associatedClass is null? throw new AnnotationException( "An association that targets entity '" + referencedEntity.getEntityName() + "' from entity '" + associatedClass.getEntityName() @@ -631,7 +635,16 @@ private static void bindPrimaryKeyReference( } } if ( value instanceof ToOne ) { - ( (ToOne) value).setSorted( true ); + ( (ToOne) value ).setSorted( true ); + } + else if ( value instanceof DependantValue ) { + ( (DependantValue) value ).setSorted( true ); + } + else { + throw new AssertionError( + "This should never happen, value can only be ToOne or DependantValue," + + "instead it's '" + value.getClass().getName() + "'" + ); } } @@ -660,7 +673,7 @@ private static boolean matchUpJoinColumnsWithKeyColumns( columns = referencedTable.getPrimaryKey().getColumns(); break; } - catch ( MappingException i ) { + catch (MappingException ignore) { } } if ( referencedColumn == null ) { @@ -723,14 +736,20 @@ private static void bindImplicitColumns( PersistentClass referencedEntity, AnnotatedJoinColumns joinColumns, SimpleValue value) { - final List idColumns = referencedEntity instanceof JoinedSubclass - ? referencedEntity.getKey().getColumns() - : referencedEntity.getIdentifier().getColumns(); + final KeyValue keyValue = referencedEntity instanceof JoinedSubclass + ? referencedEntity.getKey() + : referencedEntity.getIdentifier(); + final List idColumns = keyValue.getColumns(); for ( Column column: idColumns ) { final AnnotatedJoinColumn firstColumn = joinColumns.getJoinColumns().get(0); firstColumn.linkValueUsingDefaultColumnNaming( column, referencedEntity, value); firstColumn.overrideFromReferencedColumnIfNecessary( column ); } + if ( keyValue instanceof Component + && ( (Component) keyValue ).isSorted() + && value instanceof DependantValue ) { + ( (DependantValue) value ).setSorted( true ); + } } private static void bindUnownedAssociation( diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TimeZoneStorageHelper.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TimeZoneStorageHelper.java index d054d08161a6..89a1bc758939 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TimeZoneStorageHelper.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/TimeZoneStorageHelper.java @@ -13,9 +13,11 @@ import org.hibernate.boot.spi.MetadataBuildingContext; import org.hibernate.usertype.CompositeUserType; import org.hibernate.usertype.internal.OffsetDateTimeCompositeUserType; +import org.hibernate.usertype.internal.OffsetTimeCompositeUserType; import org.hibernate.usertype.internal.ZonedDateTimeCompositeUserType; import java.time.OffsetDateTime; +import java.time.OffsetTime; import java.time.ZonedDateTime; import static org.hibernate.TimeZoneStorageStrategy.COLUMN; @@ -23,6 +25,7 @@ public class TimeZoneStorageHelper { + private static final String OFFSET_TIME_CLASS = OffsetTime.class.getName(); private static final String OFFSET_DATETIME_CLASS = OffsetDateTime.class.getName(); private static final String ZONED_DATETIME_CLASS = ZonedDateTime.class.getName(); @@ -38,13 +41,29 @@ static Class> resolveTimeZoneStorageCompositeUser else if ( ZONED_DATETIME_CLASS.equals( returnedClassName ) ) { return ZonedDateTimeCompositeUserType.class; } + else if ( OFFSET_TIME_CLASS.equals( returnedClassName ) ) { + return OffsetTimeCompositeUserType.class; + } } return null; } - private static boolean isZonedDateTimeClass(String returnedClassName) { + private static boolean isTemporalWithTimeZoneClass(String returnedClassName) { return OFFSET_DATETIME_CLASS.equals( returnedClassName ) - || ZONED_DATETIME_CLASS.equals( returnedClassName ); + || ZONED_DATETIME_CLASS.equals( returnedClassName ) + || isOffsetTimeClass( returnedClassName ); + } + + public static boolean isOffsetTimeClass(XAnnotatedElement element) { + if ( element instanceof XProperty ) { + XProperty property = (XProperty) element; + return isOffsetTimeClass( property.getType().getName() ); + } + return false; + } + + private static boolean isOffsetTimeClass(String returnedClassName) { + return OFFSET_TIME_CLASS.equals( returnedClassName ); } static boolean useColumnForTimeZoneStorage(XAnnotatedElement element, MetadataBuildingContext context) { @@ -52,7 +71,7 @@ static boolean useColumnForTimeZoneStorage(XAnnotatedElement element, MetadataBu if ( timeZoneStorage == null ) { if ( element instanceof XProperty ) { XProperty property = (XProperty) element; - return isZonedDateTimeClass( property.getType().getName() ) + return isTemporalWithTimeZoneClass( property.getType().getName() ) //no @TimeZoneStorage annotation, so we need to use the default storage strategy && context.getBuildingOptions().getDefaultTimeZoneStorage() == COLUMN; } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ToOneBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ToOneBinder.java index 8cf18f554f43..a984e592e356 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ToOneBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/internal/ToOneBinder.java @@ -87,11 +87,18 @@ static void bindManyToOne( if ( property.isAnnotationPresent( Column.class ) || property.isAnnotationPresent( Columns.class ) ) { throw new AnnotationException( - "Property '"+ getPath( propertyHolder, inferredData ) + "Property '" + getPath( propertyHolder, inferredData ) + "' is a '@ManyToOne' association and may not use '@Column' to specify column mappings (use '@JoinColumn' instead)" ); } + if ( joinColumns.hasMappedBy() && isIdentifier( propertyHolder, propertyBinder, isIdentifierMapper ) ) { + throw new AnnotationException( + "Property '" + getPath( propertyHolder, inferredData ) + + "' is the inverse side of a '@ManyToOne' association and cannot be used as identifier" + ); + } + final Cascade hibernateCascade = property.getAnnotation( Cascade.class ); final NotFound notFound = property.getAnnotation( NotFound.class ); final NotFoundAction notFoundAction = notFound == null ? null : notFound.action(); @@ -121,6 +128,13 @@ static void bindManyToOne( ); } + private static boolean isIdentifier( + PropertyHolder propertyHolder, + PropertyBinder propertyBinder, + boolean isIdentifierMapper) { + return propertyBinder.isId() || propertyHolder.isOrWithinEmbeddedId() || propertyHolder.isInIdClass() || isIdentifierMapper; + } + private static boolean isMandatory(boolean optional, XProperty property, NotFoundAction notFoundAction) { // @MapsId means the columns belong to the pk; // A @MapsId association (obviously) must be non-null when the entity is first persisted. @@ -399,12 +413,19 @@ static void bindOneToOne( if ( property.isAnnotationPresent( Column.class ) || property.isAnnotationPresent( Columns.class ) ) { throw new AnnotationException( - "Property '"+ getPath( propertyHolder, inferredData ) + "Property '" + getPath( propertyHolder, inferredData ) + "' is a '@OneToOne' association and may not use '@Column' to specify column mappings" + " (use '@PrimaryKeyJoinColumn' instead)" ); } + if ( joinColumns.hasMappedBy() && isIdentifier( propertyHolder, propertyBinder, isIdentifierMapper ) ) { + throw new AnnotationException( + "Property '" + getPath( propertyHolder, inferredData ) + + "' is the inverse side of a '@OneToOne' association and cannot be used as identifier" + ); + } + //FIXME support a proper PKJCs final boolean trueOneToOne = property.isAnnotationPresent( PrimaryKeyJoinColumn.class ) || property.isAnnotationPresent( PrimaryKeyJoinColumns.class ); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/EnumeratedValueResolution.java b/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/EnumeratedValueResolution.java index 559ce0df04a0..39a1be399198 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/EnumeratedValueResolution.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/EnumeratedValueResolution.java @@ -6,66 +6,106 @@ */ package org.hibernate.boot.model.process.internal; +import java.util.Locale; + +import org.hibernate.boot.registry.classloading.spi.ClassLoaderService; +import org.hibernate.boot.spi.BootstrapContext; +import org.hibernate.boot.spi.MetadataBuildingContext; +import org.hibernate.internal.util.StringHelper; import org.hibernate.mapping.BasicValue; -import org.hibernate.metamodel.mapping.JdbcMapping; +import org.hibernate.service.ServiceRegistry; +import org.hibernate.type.ConvertedBasicType; +import org.hibernate.type.descriptor.converter.internal.NamedEnumValueConverter; +import org.hibernate.type.descriptor.converter.internal.OrdinalEnumValueConverter; import org.hibernate.type.descriptor.converter.spi.EnumValueConverter; -import org.hibernate.type.BasicType; -import org.hibernate.type.CustomType; +import org.hibernate.type.descriptor.java.EnumJavaType; import org.hibernate.type.descriptor.java.ImmutableMutabilityPlan; import org.hibernate.type.descriptor.java.JavaType; import org.hibernate.type.descriptor.java.MutabilityPlan; +import org.hibernate.type.descriptor.java.spi.JavaTypeRegistry; import org.hibernate.type.descriptor.jdbc.JdbcType; +import org.hibernate.type.descriptor.jdbc.JdbcTypeIndicators; +import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; +import org.hibernate.type.internal.ConvertedBasicTypeImpl; +import org.hibernate.type.spi.TypeConfiguration; + +import jakarta.persistence.EnumType; + +import static org.hibernate.type.SqlTypes.CHAR; +import static org.hibernate.type.SqlTypes.SMALLINT; +import static org.hibernate.type.SqlTypes.TINYINT; +import static org.hibernate.type.SqlTypes.VARCHAR; /** + * Resolution for {@linkplain Enum enum} mappings using {@link jakarta.persistence.Enumerated}, + * either implicitly or explicitly + * * @author Steve Ebersole */ -public class EnumeratedValueResolution> implements BasicValue.Resolution { - private final CustomType enumTypeMapping; - private final JavaType domainJtd; - private final JavaType jdbcJtd; - private final JdbcType jdbcType; - private final EnumValueConverter valueConverter; +public class EnumeratedValueResolution,R> implements BasicValue.Resolution { + public static final String PREFIX = "enum::"; + + private final EnumValueConverter valueConverter; + private final ConvertedBasicType jdbcMapping; public EnumeratedValueResolution( - CustomType enumTypeMapping, - JavaType domainJtd, - JavaType jdbcJtd, JdbcType jdbcType, - EnumValueConverter valueConverter) { - this.enumTypeMapping = enumTypeMapping; - this.domainJtd = domainJtd; - this.jdbcJtd = jdbcJtd; - this.jdbcType = jdbcType; + EnumValueConverter valueConverter, + MetadataBuildingContext context) { this.valueConverter = valueConverter; + + final String externalizableName = createName( valueConverter ); + this.jdbcMapping = new ConvertedBasicTypeImpl<>( externalizableName, jdbcType, valueConverter ); + + // todo (enum) : register database objects if needed + } + + private String createName(EnumValueConverter valueConverter) { + return String.format( + Locale.ROOT, + PREFIX + "%s::%s", + valueConverter.getDomainJavaType().getJavaType().getName(), + enumStyle( valueConverter ).name() + ); + } + + private static EnumType enumStyle(EnumValueConverter valueConverter) { + if ( valueConverter instanceof NamedEnumValueConverter ) { + return EnumType.STRING; + } + else if ( valueConverter instanceof OrdinalEnumValueConverter ) { + return EnumType.ORDINAL; + } + throw new UnsupportedOperationException(); } @Override - public JdbcMapping getJdbcMapping() { - return enumTypeMapping; + public ConvertedBasicType getJdbcMapping() { + return jdbcMapping; } @Override - public BasicType getLegacyResolvedBasicType() { - return enumTypeMapping; + public ConvertedBasicType getLegacyResolvedBasicType() { + return jdbcMapping; } @Override public JavaType getDomainJavaType() { - return domainJtd; + return jdbcMapping.getJavaTypeDescriptor(); } @Override public JavaType getRelationalJavaType() { - return jdbcJtd; + return jdbcMapping.getJdbcJavaType(); } @Override public JdbcType getJdbcType() { - return jdbcType; + return jdbcMapping.getJdbcType(); } @Override - public EnumValueConverter getValueConverter() { + public EnumValueConverter getValueConverter() { return valueConverter; } @@ -73,4 +113,60 @@ public EnumValueConverter getValueConverter() { public MutabilityPlan getMutabilityPlan() { return ImmutableMutabilityPlan.instance(); } + + public static > EnumeratedValueResolution fromName( + String name, + JdbcTypeIndicators jdbcTypeIndicators, + MetadataBuildingContext context) { + assert name != null; + assert name.startsWith( PREFIX ); + + final String[] parts = StringHelper.split( "::", name ); + assert parts.length == 3; + assert "enum".equals( parts[0] ); + + final TypeConfiguration typeConfiguration = context.getBootstrapContext().getTypeConfiguration(); + final JavaTypeRegistry javaTypeRegistry = typeConfiguration.getJavaTypeRegistry(); + final JdbcTypeRegistry jdbcTypeRegistry = typeConfiguration.getJdbcTypeRegistry(); + + final Class enumClass = resolveEnumClass( parts[1], context.getBootstrapContext() ); + final jakarta.persistence.EnumType style = jakarta.persistence.EnumType.valueOf( parts[ 2 ] ); + + //noinspection unchecked,rawtypes + final EnumJavaType enumJavaType = (EnumJavaType) javaTypeRegistry.getDescriptor( enumClass ); + final JdbcType jdbcType; + final EnumValueConverter converter; + + if ( style == EnumType.ORDINAL ) { + jdbcType = jdbcTypeRegistry.getDescriptor( enumJavaType.hasManyValues() ? SMALLINT : TINYINT ); + + final JavaType jdbcJavaType = jdbcType.getJdbcRecommendedJavaTypeMapping( + jdbcTypeIndicators.getColumnPrecision(), + jdbcTypeIndicators.getColumnScale(), + typeConfiguration + ); + converter = new OrdinalEnumValueConverter<>( enumJavaType, jdbcType, jdbcJavaType ); + } + else if ( style == EnumType.STRING ) { + jdbcType = jdbcTypeRegistry.getDescriptor( jdbcTypeIndicators.getColumnLength() == 1 ? CHAR : VARCHAR ); + final JavaType jdbcJavaType = jdbcType.getJdbcRecommendedJavaTypeMapping( + (int) jdbcTypeIndicators.getColumnLength(), + null, + typeConfiguration + ); + converter = new NamedEnumValueConverter<>( enumJavaType, jdbcType, jdbcJavaType ); + } + else { + throw new IllegalArgumentException( ); + } + + return new EnumeratedValueResolution<>( jdbcType, converter, context ); + } + + private static > Class resolveEnumClass(String enumClassName, BootstrapContext bootstrapContext) { + final ServiceRegistry serviceRegistry = bootstrapContext.getServiceRegistry(); + final ClassLoaderService classLoaderService = serviceRegistry.getService( ClassLoaderService.class ); + + return classLoaderService.classForName( enumClassName ); + } } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/InferredBasicValueResolver.java b/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/InferredBasicValueResolver.java index 6a0b2a3081f9..d9cdadcbf0d2 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/InferredBasicValueResolver.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/InferredBasicValueResolver.java @@ -8,29 +8,29 @@ import java.io.Serializable; import java.lang.reflect.Type; +import java.util.function.Function; import java.util.function.Supplier; -import jakarta.persistence.EnumType; -import jakarta.persistence.TemporalType; - import org.hibernate.MappingException; +import org.hibernate.boot.spi.MetadataBuildingContext; import org.hibernate.dialect.Dialect; import org.hibernate.mapping.BasicValue; import org.hibernate.mapping.Column; import org.hibernate.mapping.Selectable; import org.hibernate.mapping.Table; -import org.hibernate.type.descriptor.converter.internal.NamedEnumValueConverter; -import org.hibernate.type.descriptor.converter.internal.OrdinalEnumValueConverter; import org.hibernate.tool.schema.extract.spi.ColumnTypeInformation; import org.hibernate.type.AdjustableBasicType; +import org.hibernate.type.BasicPluralType; import org.hibernate.type.BasicType; -import org.hibernate.type.CustomType; import org.hibernate.type.SerializableType; -import org.hibernate.type.descriptor.java.BasicPluralJavaType; +import org.hibernate.type.descriptor.converter.internal.NamedEnumValueConverter; +import org.hibernate.type.descriptor.converter.internal.OrdinalEnumValueConverter; import org.hibernate.type.descriptor.java.BasicJavaType; +import org.hibernate.type.descriptor.java.BasicPluralJavaType; import org.hibernate.type.descriptor.java.EnumJavaType; -import org.hibernate.type.descriptor.java.ImmutableMutabilityPlan; import org.hibernate.type.descriptor.java.JavaType; +import org.hibernate.type.descriptor.java.JavaTypeHelper; +import org.hibernate.type.descriptor.java.MutabilityPlan; import org.hibernate.type.descriptor.java.SerializableJavaType; import org.hibernate.type.descriptor.java.TemporalJavaType; import org.hibernate.type.descriptor.jdbc.JdbcType; @@ -38,6 +38,9 @@ import org.hibernate.type.descriptor.jdbc.ObjectJdbcType; import org.hibernate.type.spi.TypeConfiguration; +import jakarta.persistence.EnumType; +import jakarta.persistence.TemporalType; + import static org.hibernate.type.SqlTypes.SMALLINT; import static org.hibernate.type.SqlTypes.TINYINT; @@ -52,13 +55,16 @@ public static BasicValue.Resolution from( JdbcType explicitJdbcType, Type resolvedJavaType, Supplier> reflectedJtdResolver, + Function explicitMutabilityPlanAccess, JdbcTypeIndicators stdIndicators, Table table, Selectable selectable, String ownerName, String propertyName, - Dialect dialect, - TypeConfiguration typeConfiguration) { + MetadataBuildingContext buildingContext) { + final Dialect dialect = buildingContext.getMetadataCollector().getDatabase().getDialect(); + final TypeConfiguration typeConfiguration = buildingContext.getBootstrapContext().getTypeConfiguration(); + final JavaType reflectedJtd = reflectedJtdResolver.get(); // NOTE : the distinction that is made below wrt `explicitJavaType` and `reflectedJtd` is @@ -75,15 +81,16 @@ public static BasicValue.Resolution from( null, explicitJdbcType, stdIndicators, - typeConfiguration + buildingContext ); } - else if ( explicitJavaType instanceof TemporalJavaType ) { + else if ( JavaTypeHelper.isTemporal( explicitJavaType ) ) { return fromTemporal( (TemporalJavaType) explicitJavaType, null, explicitJdbcType, resolvedJavaType, + explicitMutabilityPlanAccess, stdIndicators, typeConfiguration ); @@ -126,15 +133,16 @@ else if ( reflectedJtd != null ) { null, explicitJdbcType, stdIndicators, - typeConfiguration + buildingContext ); } - else if ( reflectedJtd instanceof TemporalJavaType ) { + else if ( JavaTypeHelper.isTemporal( reflectedJtd ) ) { return fromTemporal( (TemporalJavaType) reflectedJtd, null, explicitJdbcType, resolvedJavaType, + explicitMutabilityPlanAccess, stdIndicators, typeConfiguration ); @@ -156,21 +164,22 @@ else if ( explicitJdbcType != null ) { final JavaType elementJtd = containerJtd.getElementJavaType(); final BasicType registeredElementType; if ( elementJtd instanceof EnumJavaType ) { - final InferredBasicValueResolution resolution = InferredBasicValueResolver.fromEnum( - (EnumJavaType) elementJtd, + final EnumeratedValueResolution resolution = fromEnum( + (EnumJavaType) elementJtd, null, null, stdIndicators, - typeConfiguration + buildingContext ); - registeredElementType = resolution.getLegacyResolvedBasicType(); + registeredElementType = resolution.getJdbcMapping(); } - else if ( elementJtd instanceof TemporalJavaType ) { + else if ( JavaTypeHelper.isTemporal( elementJtd ) ) { final InferredBasicValueResolution resolution = InferredBasicValueResolver.fromTemporal( (TemporalJavaType) elementJtd, null, null, resolvedJavaType, + explicitMutabilityPlanAccess, stdIndicators, typeConfiguration ); @@ -190,10 +199,11 @@ else if ( elementJtd instanceof TemporalJavaType ) { registeredType = registeredElementType == null ? null : containerJtd.resolveType( typeConfiguration, dialect, - registeredElementType, - columnTypeInformation + resolveSqlTypeIndicators( stdIndicators, registeredElementType, elementJtd ), + columnTypeInformation, + stdIndicators ); - if ( registeredType != null ) { + if ( registeredType instanceof BasicPluralType ) { typeConfiguration.getBasicTypeRegistry().register( registeredType ); } } @@ -296,7 +306,7 @@ else if ( column.getLength() != null ) { jdbcMapping.getJavaTypeDescriptor(), jdbcMapping.getJdbcType(), jdbcMapping, - null + determineMutabilityPlan( explicitMutabilityPlanAccess, jdbcMapping.getJavaTypeDescriptor(), typeConfiguration ) ); } @@ -314,60 +324,81 @@ public static BasicType resolveSqlTypeIndicators( } } - public static , N extends Number> InferredBasicValueResolution fromEnum( + public static , R> EnumeratedValueResolution fromEnum( EnumJavaType enumJavaType, - BasicJavaType explicitJavaType, + BasicJavaType explicitJavaType, JdbcType explicitJdbcType, JdbcTypeIndicators stdIndicators, - TypeConfiguration typeConfiguration) { - final EnumType enumStyle = stdIndicators.getEnumeratedType() != null - ? stdIndicators.getEnumeratedType() - : EnumType.ORDINAL; - - switch ( enumStyle ) { - case STRING: { - return stringEnumValueResolution( - enumJavaType, - explicitJavaType, - explicitJdbcType, - stdIndicators, - typeConfiguration - ); - } - case ORDINAL: { - return ordinalEnumValueResolution( - enumJavaType, - explicitJavaType, - explicitJdbcType, - typeConfiguration - ); - } - default: { - throw new MappingException( "Unknown enumeration-style (JPA EnumType) : " + enumStyle ); - } + MetadataBuildingContext context) { + final EnumType enumStyle = stdIndicators.getEnumeratedType(); + + if ( enumStyle == EnumType.STRING ) { + //noinspection unchecked + return (EnumeratedValueResolution) namedEnumValueResolution( + enumJavaType, + explicitJavaType, + explicitJdbcType, + stdIndicators, + context + ); + } + + if ( enumStyle == EnumType.ORDINAL ) { + //noinspection unchecked + return (EnumeratedValueResolution) ordinalEnumValueResolution( + enumJavaType, + (BasicJavaType)explicitJavaType, + explicitJdbcType, + context + ); } + + if ( enumStyle == null ) { + // NOTE : separate from the explicit ORDINAL check to facilitate + // handling native database enum types. In theory anyway - atm + // we cannot discern an implicit (default value) or explicit style + // due to HCANN and annotation handling for default values + + //noinspection unchecked + return (EnumeratedValueResolution) ordinalEnumValueResolution( + enumJavaType, + (BasicJavaType)explicitJavaType, + explicitJdbcType, + context + ); + } + + throw new MappingException( "Unknown enumeration-style (JPA EnumType) : " + enumStyle ); } - private static , N extends Number> InferredBasicValueResolution ordinalEnumValueResolution( + private static , N extends Number> EnumeratedValueResolution ordinalEnumValueResolution( EnumJavaType enumJavaType, - JavaType explicitJavaType, + BasicJavaType explicitJavaType, JdbcType explicitJdbcType, - TypeConfiguration typeConfiguration) { - return ordinalResolution( - enumJavaType, - ordinalJavaType( explicitJavaType, typeConfiguration ), - ordinalJdbcType( explicitJdbcType, enumJavaType, typeConfiguration ), - typeConfiguration + MetadataBuildingContext context) { + final JdbcType jdbcType = ordinalJdbcType( explicitJdbcType, enumJavaType, context ); + final JavaType relationalJavaType = ordinalJavaType( explicitJavaType, jdbcType, context ); + + return new EnumeratedValueResolution<>( + jdbcType, + new OrdinalEnumValueConverter<>( enumJavaType, jdbcType, relationalJavaType ), + context ); } - private static JdbcType ordinalJdbcType(JdbcType explicitJdbcType, EnumJavaType enumJavaType, TypeConfiguration typeConfiguration) { + private static JdbcType ordinalJdbcType( + JdbcType explicitJdbcType, + EnumJavaType enumJavaType, + MetadataBuildingContext context) { return explicitJdbcType != null ? explicitJdbcType - : typeConfiguration.getJdbcTypeRegistry().getDescriptor( enumJavaType.hasManyValues() ? SMALLINT : TINYINT ); + : context.getMetadataCollector().getTypeConfiguration().getJdbcTypeRegistry().getDescriptor( enumJavaType.hasManyValues() ? SMALLINT : TINYINT ); } - private static JavaType ordinalJavaType(JavaType explicitJavaType, TypeConfiguration typeConfiguration) { + private static JavaType ordinalJavaType( + JavaType explicitJavaType, + JdbcType jdbcType, + MetadataBuildingContext context) { if ( explicitJavaType != null ) { if ( !Integer.class.isAssignableFrom( explicitJavaType.getJavaTypeClass() ) ) { throw new MappingException( @@ -379,73 +410,29 @@ private static JavaType ordinalJavaType(JavaType explic return explicitJavaType; } else { - return typeConfiguration.getJavaTypeRegistry().getDescriptor( Integer.class ); + return jdbcType.getJdbcRecommendedJavaTypeMapping( + null, + null, + context.getMetadataCollector().getTypeConfiguration() + ); } } - private static , N extends Number> InferredBasicValueResolution ordinalResolution( - EnumJavaType enumJavaType, - JavaType relationalJtd, - JdbcType jdbcType, - TypeConfiguration typeConfiguration - ) { - final CustomType customType = new CustomType<>( - new org.hibernate.type.EnumType<>( - enumJavaType.getJavaTypeClass(), - new OrdinalEnumValueConverter<>( enumJavaType, jdbcType, relationalJtd ), - typeConfiguration - ), - typeConfiguration - ); - return new InferredBasicValueResolution<>( - customType, - enumJavaType, - relationalJtd, - jdbcType, - customType, - ImmutableMutabilityPlan.instance() - ); - } - - private static > InferredBasicValueResolution stringEnumValueResolution( + private static > EnumeratedValueResolution namedEnumValueResolution( EnumJavaType enumJavaType, BasicJavaType explicitJavaType, JdbcType explicitJdbcType, JdbcTypeIndicators stdIndicators, - TypeConfiguration typeConfiguration) { - final JavaType relationalJtd = stringJavaType( explicitJavaType, stdIndicators, typeConfiguration ); - return stringResolution( - enumJavaType, - relationalJtd, - stringJdbcType( explicitJdbcType, stdIndicators, relationalJtd ), - typeConfiguration - ); - } + MetadataBuildingContext context) { + final JdbcType jdbcType = explicitJdbcType == null + ? enumJavaType.getRecommendedJdbcType( stdIndicators ) + : explicitJdbcType; + final JavaType relationalJtd = namedJavaType( explicitJavaType, stdIndicators, context ); - private static > InferredBasicValueResolution stringResolution( - EnumJavaType enumJavaType, - JavaType relationalJtd, - JdbcType jdbcType, - TypeConfiguration typeConfiguration) { - final CustomType customType = new CustomType<>( - new org.hibernate.type.EnumType<>( - enumJavaType.getJavaTypeClass(), - new NamedEnumValueConverter( - enumJavaType, - jdbcType, - relationalJtd - ), - typeConfiguration - ), - typeConfiguration - ); - return new InferredBasicValueResolution<>( - customType, - enumJavaType, - relationalJtd, + return new EnumeratedValueResolution<>( jdbcType, - customType, - ImmutableMutabilityPlan.instance() + new NamedEnumValueConverter<>( enumJavaType, jdbcType, relationalJtd ), + context ); } @@ -455,7 +442,10 @@ private static JdbcType stringJdbcType(JdbcType explicitJdbcType, JdbcTypeIndica : relationalJtd.getRecommendedJdbcType( stdIndicators ); } - private static JavaType stringJavaType(BasicJavaType explicitJavaType, JdbcTypeIndicators stdIndicators, TypeConfiguration typeConfiguration) { + private static JavaType namedJavaType( + BasicJavaType explicitJavaType, + JdbcTypeIndicators stdIndicators, + MetadataBuildingContext context) { if ( explicitJavaType != null ) { if ( ! String.class.isAssignableFrom( explicitJavaType.getJavaTypeClass() ) ) { throw new MappingException( @@ -464,10 +454,10 @@ private static JavaType stringJavaType(BasicJavaType explicitJavaType " should handle `java.lang.String` as its relational type descriptor" ); } - return (JavaType) explicitJavaType; + return (JavaType) explicitJavaType; } else { - return typeConfiguration.getJavaTypeRegistry() + return context.getMetadataCollector().getTypeConfiguration().getJavaTypeRegistry() .getDescriptor( stdIndicators.getColumnLength() == 1 ? Character.class : String.class ); } } @@ -477,6 +467,7 @@ public static InferredBasicValueResolution fromTemporal( BasicJavaType explicitJavaType, JdbcType explicitJdbcType, Type resolvedJavaType, + Function explicitMutabilityPlanAccess, JdbcTypeIndicators stdIndicators, TypeConfiguration typeConfiguration) { final TemporalType requestedTemporalPrecision = stdIndicators.getTemporalPrecision(); @@ -485,7 +476,7 @@ public static InferredBasicValueResolution fromTemporal( // Case #1 - explicit JavaType if ( explicitJavaType != null ) { - if ( !(explicitJavaType instanceof TemporalJavaType) ) { + if ( !JavaTypeHelper.isTemporal( explicitJavaType ) ) { throw new MappingException( "Explicit JavaType [" + explicitJavaType + "] defined for temporal value must implement TemporalJavaType" @@ -509,13 +500,14 @@ public static InferredBasicValueResolution fromTemporal( final BasicType jdbcMapping = typeConfiguration.getBasicTypeRegistry().resolve( explicitTemporalJtd, jdbcType ); + final MutabilityPlan mutabilityPlan = determineMutabilityPlan( explicitMutabilityPlanAccess, explicitTemporalJtd, typeConfiguration ); return new InferredBasicValueResolution<>( jdbcMapping, explicitTemporalJtd, explicitTemporalJtd, jdbcType, jdbcMapping, - explicitTemporalJtd.getMutabilityPlan() + mutabilityPlan ); } @@ -535,7 +527,8 @@ public static InferredBasicValueResolution fromTemporal( ); } else { - jtd = reflectedJtd; + // Avoid using the DateJavaType and prefer the JdbcTimestampJavaType + jtd = reflectedJtd.resolveTypeForPrecision( reflectedJtd.getPrecision(), typeConfiguration ); } final BasicType jdbcMapping = typeConfiguration.getBasicTypeRegistry().resolve( jtd, explicitJdbcType ); @@ -564,7 +557,8 @@ public static InferredBasicValueResolution fromTemporal( } else { basicType = typeConfiguration.getBasicTypeRegistry().resolve( - reflectedJtd, + // Avoid using the DateJavaType and prefer the JdbcTimestampJavaType + reflectedJtd.resolveTypeForPrecision( reflectedJtd.getPrecision(), typeConfiguration ), reflectedJtd.getRecommendedJdbcType( stdIndicators ) ); } @@ -575,8 +569,22 @@ public static InferredBasicValueResolution fromTemporal( basicType.getJavaTypeDescriptor(), basicType.getJdbcType(), basicType, - reflectedJtd.getMutabilityPlan() + determineMutabilityPlan( explicitMutabilityPlanAccess, reflectedJtd, typeConfiguration ) ); } + @SuppressWarnings({ "rawtypes", "unchecked" }) + private static MutabilityPlan determineMutabilityPlan( + Function explicitMutabilityPlanAccess, + JavaType jtd, + TypeConfiguration typeConfiguration) { + if ( explicitMutabilityPlanAccess != null ) { + final MutabilityPlan mutabilityPlan = explicitMutabilityPlanAccess.apply( typeConfiguration ); + if ( mutabilityPlan != null ) { + return mutabilityPlan; + } + } + return jtd.getMutabilityPlan(); + } + } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/NamedConverterResolution.java b/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/NamedConverterResolution.java index e423fe78fcba..3eff73136f24 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/NamedConverterResolution.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/process/internal/NamedConverterResolution.java @@ -6,8 +6,11 @@ */ package org.hibernate.boot.model.process.internal; +import java.util.Collection; +import java.util.Map; import java.util.function.Function; +import org.hibernate.annotations.Immutable; import org.hibernate.boot.model.convert.internal.ClassBasedConverterDescriptor; import org.hibernate.boot.model.convert.spi.ConverterDescriptor; import org.hibernate.boot.model.convert.spi.JpaAttributeConverterCreationContext; @@ -15,9 +18,9 @@ import org.hibernate.boot.spi.MetadataBuildingContext; import org.hibernate.mapping.BasicValue; import org.hibernate.metamodel.mapping.JdbcMapping; -import org.hibernate.type.descriptor.converter.spi.JpaAttributeConverter; import org.hibernate.type.BasicType; import org.hibernate.type.descriptor.converter.internal.AttributeConverterMutabilityPlanImpl; +import org.hibernate.type.descriptor.converter.spi.JpaAttributeConverter; import org.hibernate.type.descriptor.java.BasicJavaType; import org.hibernate.type.descriptor.java.ImmutableMutabilityPlan; import org.hibernate.type.descriptor.java.JavaType; @@ -112,23 +115,14 @@ private static NamedConverterResolution fromInternal( ? explicitJdbcType : relationalJtd.getRecommendedJdbcType( sqlTypeIndicators ); - final MutabilityPlan explicitMutabilityPlan = explicitMutabilityPlanAccess != null - ? explicitMutabilityPlanAccess.apply( typeConfiguration ) - : null; - - - final MutabilityPlan mutabilityPlan; - if ( explicitMutabilityPlan != null ) { - mutabilityPlan = explicitMutabilityPlan; - } - else if ( ! domainJtd.getMutabilityPlan().isMutable() ) { - mutabilityPlan = ImmutableMutabilityPlan.instance(); - } - else { - mutabilityPlan = new AttributeConverterMutabilityPlanImpl<>( converter, true ); - } + final MutabilityPlan mutabilityPlan = determineMutabilityPlan( + explicitMutabilityPlanAccess, + typeConfiguration, + converter, + domainJtd + ); - return new NamedConverterResolution( + return new NamedConverterResolution<>( domainJtd, relationalJtd, jdbcType, @@ -138,6 +132,38 @@ else if ( ! domainJtd.getMutabilityPlan().isMutable() ) { ); } + private static MutabilityPlan determineMutabilityPlan( + Function explicitMutabilityPlanAccess, + TypeConfiguration typeConfiguration, + JpaAttributeConverter converter, + JavaType domainJtd) { + //noinspection unchecked + final MutabilityPlan explicitMutabilityPlan = explicitMutabilityPlanAccess != null + ? explicitMutabilityPlanAccess.apply( typeConfiguration ) + : null; + if ( explicitMutabilityPlan != null ) { + return explicitMutabilityPlan; + } + + if ( converter.getConverterJavaType().getJavaTypeClass().isAnnotationPresent( Immutable.class ) ) { + return ImmutableMutabilityPlan.instance(); + } + + // if the domain JavaType is immutable, use the immutability plan + // - note : ignore this for collection-as-basic mappings. + if ( !domainJtd.getMutabilityPlan().isMutable() + && !isCollection( domainJtd.getJavaTypeClass() ) ) { + return ImmutableMutabilityPlan.instance(); + } + + return new AttributeConverterMutabilityPlanImpl<>( converter, true ); + } + + private static boolean isCollection(Class javaType) { + return Collection.class.isAssignableFrom( javaType ) + || Map.class.isAssignableFrom( javaType ); + } + private final JavaType domainJtd; private final JavaType relationalJtd; diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/process/spi/MetadataBuildingProcess.java b/hibernate-core/src/main/java/org/hibernate/boot/model/process/spi/MetadataBuildingProcess.java index 409d61704547..b145f1e722a5 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/process/spi/MetadataBuildingProcess.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/process/spi/MetadataBuildingProcess.java @@ -8,15 +8,19 @@ import java.io.InputStream; import java.sql.Types; +import java.time.Duration; import java.time.Instant; import java.time.OffsetDateTime; +import java.time.OffsetTime; import java.time.ZonedDateTime; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import org.hibernate.boot.MetadataSources; import org.hibernate.boot.internal.InFlightMetadataCollectorImpl; @@ -62,9 +66,13 @@ import org.hibernate.type.BasicType; import org.hibernate.type.BasicTypeRegistry; import org.hibernate.type.SqlTypes; +import org.hibernate.type.StandardBasicTypes; +import org.hibernate.type.WrapperArrayHandling; +import org.hibernate.type.descriptor.java.ByteArrayJavaType; +import org.hibernate.type.descriptor.java.CharacterArrayJavaType; import org.hibernate.type.descriptor.java.spi.JavaTypeRegistry; import org.hibernate.type.descriptor.jdbc.JdbcType; -import org.hibernate.type.descriptor.jdbc.JsonJdbcType; +import org.hibernate.type.descriptor.jdbc.JsonAsStringJdbcType; import org.hibernate.type.descriptor.jdbc.XmlAsStringJdbcType; import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; import org.hibernate.type.descriptor.sql.DdlType; @@ -602,14 +610,27 @@ public void contributeAttributeConverter(Class javaType, + String name, + String... additionalKeys) { + final JavaTypeRegistry javaTypeRegistry = typeConfiguration.getJavaTypeRegistry(); + final BasicTypeRegistry basicTypeRegistry = typeConfiguration.getBasicTypeRegistry(); + final BasicType basicType = new NamedBasicTypeImpl<>( + javaTypeRegistry.getDescriptor( javaType ), + jdbcTypeRegistry.getDescriptor( preferredSqlTypeCode ), + name + ); + final String[] keys = Arrays.copyOf( additionalKeys, additionalKeys.length + 2 ); + keys[additionalKeys.length] = javaType.getSimpleName(); + keys[additionalKeys.length + 1] = javaType.getName(); + basicTypeRegistry.register( basicType, keys ); + } + + private static void adaptTimeTypesToDefaultTimeZoneStorage( + TypeConfiguration typeConfiguration, + JdbcType timestampWithTimeZoneOverride) { final JavaTypeRegistry javaTypeRegistry = typeConfiguration.getJavaTypeRegistry(); final BasicTypeRegistry basicTypeRegistry = typeConfiguration.getBasicTypeRegistry(); - final BasicType instantType = new NamedBasicTypeImpl<>( - javaTypeRegistry.getDescriptor( Instant.class ), - jdbcTypeRegistry.getDescriptor( preferredSqlTypeCodeForInstant ), - "instant" + final BasicType offsetDateTimeType = new NamedBasicTypeImpl<>( + javaTypeRegistry.getDescriptor( OffsetTime.class ), + timestampWithTimeZoneOverride, + "OffsetTime" ); basicTypeRegistry.register( - instantType, - "org.hibernate.type.InstantType", - Instant.class.getSimpleName(), - Instant.class.getName() + offsetDateTimeType, + "org.hibernate.type.OffsetTimeType", + OffsetTime.class.getSimpleName(), + OffsetTime.class.getName() ); } - private static void adaptToDefaultTimeZoneStorage( + private static void adaptTimestampTypesToDefaultTimeZoneStorage( TypeConfiguration typeConfiguration, JdbcType timestampWithTimeZoneOverride) { final JavaTypeRegistry javaTypeRegistry = typeConfiguration.getJavaTypeRegistry(); @@ -771,6 +814,19 @@ private static void adaptToDefaultTimeZoneStorage( ); } + private static JdbcType getTimeWithTimeZoneOverride(MetadataBuildingOptions options, JdbcTypeRegistry jdbcTypeRegistry) { + switch ( options.getDefaultTimeZoneStorage() ) { + case NORMALIZE: + // For NORMALIZE, we replace the standard types that use TIME_WITH_TIMEZONE to use TIME + return jdbcTypeRegistry.getDescriptor( Types.TIME ); + case NORMALIZE_UTC: + // For NORMALIZE_UTC, we replace the standard types that use TIME_WITH_TIMEZONE to use TIME_UTC + return jdbcTypeRegistry.getDescriptor( SqlTypes.TIME_UTC ); + default: + return null; + } + } + private static JdbcType getTimestampWithTimeZoneOverride(MetadataBuildingOptions options, JdbcTypeRegistry jdbcTypeRegistry) { switch ( options.getDefaultTimeZoneStorage() ) { case NORMALIZE: diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/relational/ColumnOrderingStrategyStandard.java b/hibernate-core/src/main/java/org/hibernate/boot/model/relational/ColumnOrderingStrategyStandard.java index b03508ec3664..e85f5be4dbb7 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/relational/ColumnOrderingStrategyStandard.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/relational/ColumnOrderingStrategyStandard.java @@ -195,6 +195,7 @@ protected static int physicalSizeInBytes(int sqlTypeCode, Size columnSize, Metad return (int) length; case DATE: case TIME: + case TIME_UTC: case TIME_WITH_TIMEZONE: return 4; case TIMESTAMP: diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/relational/QualifiedNameParser.java b/hibernate-core/src/main/java/org/hibernate/boot/model/relational/QualifiedNameParser.java index 9aabf934ff13..62d8ad160bbb 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/relational/QualifiedNameParser.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/relational/QualifiedNameParser.java @@ -11,6 +11,7 @@ import org.hibernate.HibernateException; import org.hibernate.boot.model.naming.Identifier; import org.hibernate.boot.model.naming.IllegalIdentifierException; +import org.hibernate.internal.util.StringHelper; /** * Parses a qualified name. @@ -114,6 +115,17 @@ public NameParts parse(String text, Identifier defaultCatalog, Identifier defaul throw new IllegalIdentifierException( "Object name to parse must be specified, but found null" ); } + final int quoteCharCount = StringHelper.count( text, "`" ); + final boolean wasQuotedInEntirety = quoteCharCount == 2 && text.startsWith( "`" ) && text.endsWith( "`" ); + + if ( wasQuotedInEntirety ) { + return new NameParts( + defaultCatalog, + defaultSchema, + Identifier.toIdentifier( unquote( text ), true ) + ); + } + String catalogName = null; String schemaName = null; String name; @@ -122,15 +134,6 @@ public NameParts parse(String text, Identifier defaultCatalog, Identifier defaul boolean schemaWasQuoted = false; boolean nameWasQuoted; - // Note that we try to handle both forms of quoting, - // 1) where the entire string was quoted - // 2) where one or more individual parts were quoted - - boolean wasQuotedInEntirety = text.startsWith( "`" ) && text.endsWith( "`" ); - if ( wasQuotedInEntirety ) { - text = unquote( text ); - } - final String[] tokens = text.split( "\\." ); if ( tokens.length == 0 || tokens.length == 1 ) { // we have just a local name... diff --git a/hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/ModelBinder.java b/hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/ModelBinder.java index 5cf652a4cba0..d65225f6b991 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/ModelBinder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/model/source/internal/hbm/ModelBinder.java @@ -17,6 +17,7 @@ import org.hibernate.AssertionFailure; import org.hibernate.FetchMode; +import org.hibernate.Remove; import org.hibernate.annotations.SourceType; import org.hibernate.boot.MappingException; import org.hibernate.boot.jaxb.Origin; @@ -160,6 +161,7 @@ import org.hibernate.usertype.ParameterizedType; import org.hibernate.usertype.UserType; +import static org.hibernate.cfg.AvailableSettings.USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS; import static org.hibernate.internal.util.collections.CollectionHelper.isEmpty; import static org.hibernate.mapping.SimpleValue.DEFAULT_ID_GEN_STRATEGY; @@ -196,6 +198,28 @@ protected MetadataBuildingContext getBuildingContext() { this.relationalObjectBinder = new RelationalObjectBinder( context ); } + /** + * @deprecated Interprets the setting {@value AvailableSettings#USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS}, + * which itself is deprecated + */ + @SuppressWarnings("removal") + @Remove + @Deprecated( since = "6.2" ) + public static boolean useEntityWhereClauseForCollections(MetadataBuildingContext buildingContext) { + final Object explicitSetting = buildingContext + .getBuildingOptions() + .getServiceRegistry() + .getService( ConfigurationService.class ) + .getSettings() + .get( USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS ); + if ( explicitSetting != null ) { + DeprecationLogger.DEPRECATION_LOGGER.deprecatedSettingNoReplacement( USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS ); + return ConfigurationHelper.toBoolean( explicitSetting, true ); + } + + return true; + } + public void bindEntityHierarchy(EntityHierarchySourceImpl hierarchySource) { final RootClass rootEntityDescriptor = new RootClass( hierarchySource.getRootEntityMappingDocument() ); bindRootEntity( hierarchySource, rootEntityDescriptor ); @@ -992,7 +1016,7 @@ private void bindEntityVersion( } if ( versionAttributeSource.getSource().equals("db") ) { property.setValueGeneratorCreator( - context -> new SourceGeneration( SourceType.DB, property.getType().getReturnedClass() ) ); + context -> new SourceGeneration( SourceType.DB, property.getType().getReturnedClass(), context ) ); } rootEntityDescriptor.setVersion( property ); @@ -2608,7 +2632,7 @@ else if ( isVirtual ) { if ( CompositeUserType.class.isAssignableFrom( componentClass ) ) { componentBinding.setTypeName( explicitComponentClassName ); CompositeUserType compositeUserType; - if ( sourceDocument.getBuildingOptions().disallowExtensionsInCdi() ) { + if ( !sourceDocument.getBuildingOptions().isAllowExtensionsInCdi() ) { compositeUserType = (CompositeUserType) FallbackBeanInstanceProducer.INSTANCE.produceBeanInstance( componentClass ); } else { @@ -3122,17 +3146,17 @@ public void doSecondPass(Map persistentClasses) throws collectionBinding.createAllKeys(); if ( log.isDebugEnabled() ) { - log.debugf( "Mapped collection : " + getPluralAttributeSource().getAttributeRole().getFullPath() ); - log.debugf( " + table -> " + getCollectionBinding().getTable().getName() ); - log.debugf( " + key -> " + columns( getCollectionBinding().getKey() ) ); + log.debugf( "Mapped collection : %s", getPluralAttributeSource().getAttributeRole().getFullPath() ); + log.debugf( " + table -> %s", getCollectionBinding().getTable().getName() ); + log.debugf( " + key -> %s", columns( getCollectionBinding().getKey() ) ); if ( getCollectionBinding().isIndexed() ) { - log.debugf( " + index -> " + columns( ( (IndexedCollection) getCollectionBinding() ).getIndex() ) ); + log.debugf( " + index -> %s", columns( ( (IndexedCollection) getCollectionBinding() ).getIndex() ) ); } if ( getCollectionBinding().isOneToMany() ) { - log.debugf( " + one-to-many -> " + ( (OneToMany) getCollectionBinding().getElement() ).getReferencedEntityName() ); + log.debugf( " + one-to-many -> %s", ( (OneToMany) getCollectionBinding().getElement() ).getReferencedEntityName() ); } else { - log.debugf( " + element -> " + columns( getCollectionBinding().getElement() ) ); + log.debugf( " + element -> %s", columns( getCollectionBinding().getElement() ) ); } } } @@ -3420,7 +3444,7 @@ else if ( getPluralAttributeSource().getElementSource() instanceof PluralAttribu final PersistentClass referencedEntityBinding = getReferencedEntityBinding( elementSource.getReferencedEntityName() ); - if ( useEntityWhereClauseForCollections() ) { + if ( useEntityWhereClauseForCollections( metadataBuildingContext ) ) { // For a one-to-many association, there are 2 possible sources of "where" clauses that apply // to the associated entity table: // 1) from the associated entity mapping; i.e., @@ -3490,7 +3514,7 @@ else if ( getPluralAttributeSource().getElementSource() instanceof PluralAttribu // This "where" clause comes from the collection mapping; e.g., getCollectionBinding().setWhere( getPluralAttributeSource().getWhere() ); - if ( useEntityWhereClauseForCollections() ) { + if ( useEntityWhereClauseForCollections( metadataBuildingContext ) ) { // For a many-to-many association, there are 2 possible sources of "where" clauses that apply // to the associated entity table (not the join table): // 1) from the associated entity mapping; i.e., @@ -3608,18 +3632,6 @@ private PersistentClass getReferencedEntityBinding(String referencedEntityName) } } - private boolean useEntityWhereClauseForCollections() { - return ConfigurationHelper.getBoolean( - AvailableSettings.USE_ENTITY_WHERE_CLAUSE_FOR_COLLECTIONS, - metadataBuildingContext - .getBuildingOptions() - .getServiceRegistry() - .getService( ConfigurationService.class ) - .getSettings(), - true - ); - } - private class PluralAttributeListSecondPass extends AbstractPluralAttributeSecondPass { public PluralAttributeListSecondPass( MappingDocument sourceDocument, diff --git a/hibernate-core/src/main/java/org/hibernate/boot/query/BootQueryLogging.java b/hibernate-core/src/main/java/org/hibernate/boot/query/BootQueryLogging.java index 990e7d3face1..9d46983f369b 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/query/BootQueryLogging.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/query/BootQueryLogging.java @@ -21,7 +21,4 @@ public interface BootQueryLogging { String NAME = BootLogging.NAME + ".query"; Logger BOOT_QUERY_LOGGER = Logger.getLogger( NAME ); - - boolean DEBUG_ENABLED = BOOT_QUERY_LOGGER.isDebugEnabled(); - boolean TRACE_ENABLED = BOOT_QUERY_LOGGER.isTraceEnabled(); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/query/HbmResultSetMappingDescriptor.java b/hibernate-core/src/main/java/org/hibernate/boot/query/HbmResultSetMappingDescriptor.java index 3f4f32e9fa8a..d5a8e9a845f4 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/query/HbmResultSetMappingDescriptor.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/query/HbmResultSetMappingDescriptor.java @@ -670,7 +670,7 @@ public FetchMemento resolve(ResultSetMappingResolutionContext resolutionContext) ) ); } - fetchable = (Fetchable) ( (FetchableContainer) fetchable ).findSubPart( propertyPathParts[i], null ); + fetchable = (Fetchable) ( (FetchableContainer) fetchable.getPartMappingType() ).findSubPart( propertyPathParts[i], null ); navigablePath = navigablePath.append( fetchable.getFetchableName() ); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/query/SqlResultSetMappingDescriptor.java b/hibernate-core/src/main/java/org/hibernate/boot/query/SqlResultSetMappingDescriptor.java index 519e8f1f780c..21bef1420394 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/query/SqlResultSetMappingDescriptor.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/query/SqlResultSetMappingDescriptor.java @@ -276,11 +276,7 @@ private static FetchMementoBasic resolveDiscriminatorMemento( String discriminatorColumn, NavigablePath entityPath) { final EntityDiscriminatorMapping discriminatorMapping = entityMapping.getDiscriminatorMapping(); - if ( discriminatorMapping == null ) { - return null; - } - - if ( discriminatorColumn == null ) { + if ( discriminatorMapping == null || discriminatorColumn == null || !entityMapping.hasSubclasses() ) { return null; } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/internal/AggregatedClassLoader.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/internal/AggregatedClassLoader.java index 1ecf93ca4518..68dac1c025dd 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/internal/AggregatedClassLoader.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/internal/AggregatedClassLoader.java @@ -12,6 +12,8 @@ import java.util.Iterator; import java.util.LinkedHashSet; +import org.hibernate.internal.util.ExceptionHelper; + public class AggregatedClassLoader extends ClassLoader { private final ClassLoader[] individualClassLoaders; private final TcclLookupPrecedence tcclLookupPrecedence; @@ -196,18 +198,21 @@ protected URL findResource(String name) { @Override protected Class findClass(String name) throws ClassNotFoundException { final Iterator clIterator = newClassLoaderIterator(); + Throwable t = null; while ( clIterator.hasNext() ) { final ClassLoader classLoader = clIterator.next(); try { return classLoader.loadClass( name ); } - catch (Exception ignore) { + catch (Exception ex) { + ExceptionHelper.combine( ( t == null ? t = new Throwable() : t ), ex ); } - catch (LinkageError ignore) { + catch (LinkageError le) { + ExceptionHelper.combine( ( t == null ? t = new Throwable() : t ), le ); } } - throw new ClassNotFoundException( "Could not load requested class : " + name ); + throw new ClassNotFoundException( "Could not load requested class : " + name, ( t != null && t.getSuppressed().length > 0 ? t : null ) ); } private static ClassLoader locateSystemClassLoader() { diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/spi/ClassLoaderService.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/spi/ClassLoaderService.java index 2b6e9bcaf82c..b291f64319e4 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/spi/ClassLoaderService.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/classloading/spi/ClassLoaderService.java @@ -96,22 +96,18 @@ default Class classForTypeName(String className) { * @param The type of the service contract * * @return The ordered set of discovered services. + * + * @see org.hibernate.service.JavaServiceLoadable */ Collection loadJavaServices(Class serviceContract); T generateProxy(InvocationHandler handler, Class... interfaces); /** - * Loading a Package from the classloader. In case it's not found or an - * internal error (such as @see {@link LinkageError} occurs, we - * return null rather than throwing an exception. - * This is significantly different than loading a Class, as in all - * currently known usages, being unable to load the Package will - * only result in ignoring annotations on it - which is totally - * fine when the object doesn't exist. - * In case of other errors, implementations are expected to log - * a warning but it's still not treated as a fatal error. - * @return the matching Package, or null. + * Loading a Package from the ClassLoader. + * + * @return The Package. {@code null} if no such Package is found, or if the + * ClassLoader call leads to an exception ({@link LinkageError}, e.g.). */ Package packageForNameOrNull(String packageName); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/StrategyRegistrationProvider.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/StrategyRegistrationProvider.java index f8903834f15b..a7b9dcd2397a 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/StrategyRegistrationProvider.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/StrategyRegistrationProvider.java @@ -6,6 +6,8 @@ */ package org.hibernate.boot.registry.selector; +import org.hibernate.service.JavaServiceLoadable; + /** * Responsible for providing the registrations of one or more strategy selectors. *

    @@ -20,6 +22,7 @@ * * @author Steve Ebersole */ +@JavaServiceLoadable public interface StrategyRegistrationProvider { /** * Get all {@link StrategyRegistration}s announced by this provider. diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/DefaultDialectSelector.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/DefaultDialectSelector.java index f7aa51fa9633..dd6ae8f9433c 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/DefaultDialectSelector.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/DefaultDialectSelector.java @@ -9,58 +9,35 @@ import java.util.Objects; import org.hibernate.boot.registry.selector.spi.DialectSelector; +import org.hibernate.boot.registry.selector.spi.StrategySelectionException; import org.hibernate.dialect.CockroachDialect; -import org.hibernate.dialect.DB2390Dialect; -import org.hibernate.dialect.DB2390V8Dialect; import org.hibernate.dialect.DB2400Dialect; import org.hibernate.dialect.DB2400V7R3Dialect; -import org.hibernate.dialect.DB297Dialect; import org.hibernate.dialect.DB2Dialect; import org.hibernate.dialect.DB2iDialect; import org.hibernate.dialect.DB2zDialect; import org.hibernate.dialect.DerbyDialect; -import org.hibernate.dialect.DerbyTenFiveDialect; -import org.hibernate.dialect.DerbyTenSevenDialect; -import org.hibernate.dialect.DerbyTenSixDialect; import org.hibernate.dialect.Dialect; import org.hibernate.dialect.H2Dialect; import org.hibernate.dialect.HANACloudColumnStoreDialect; import org.hibernate.dialect.HANAColumnStoreDialect; import org.hibernate.dialect.HANARowStoreDialect; import org.hibernate.dialect.HSQLDialect; -import org.hibernate.dialect.MariaDB102Dialect; import org.hibernate.dialect.MariaDB103Dialect; -import org.hibernate.dialect.MariaDB10Dialect; -import org.hibernate.dialect.MariaDB53Dialect; import org.hibernate.dialect.MariaDBDialect; -import org.hibernate.dialect.MySQL55Dialect; import org.hibernate.dialect.MySQL57Dialect; -import org.hibernate.dialect.MySQL5Dialect; import org.hibernate.dialect.MySQL8Dialect; import org.hibernate.dialect.MySQLDialect; -import org.hibernate.dialect.Oracle10gDialect; import org.hibernate.dialect.Oracle12cDialect; -import org.hibernate.dialect.Oracle8iDialect; -import org.hibernate.dialect.Oracle9iDialect; import org.hibernate.dialect.OracleDialect; -import org.hibernate.dialect.PostgreSQL81Dialect; -import org.hibernate.dialect.PostgreSQL82Dialect; -import org.hibernate.dialect.PostgreSQL91Dialect; -import org.hibernate.dialect.PostgreSQL92Dialect; -import org.hibernate.dialect.PostgreSQL93Dialect; -import org.hibernate.dialect.PostgreSQL94Dialect; -import org.hibernate.dialect.PostgreSQL95Dialect; -import org.hibernate.dialect.PostgreSQL9Dialect; +import org.hibernate.dialect.PostgreSQL10Dialect; import org.hibernate.dialect.PostgreSQLDialect; import org.hibernate.dialect.PostgresPlusDialect; -import org.hibernate.dialect.SQLServer2005Dialect; import org.hibernate.dialect.SQLServer2008Dialect; import org.hibernate.dialect.SQLServer2012Dialect; +import org.hibernate.dialect.SQLServer2016Dialect; import org.hibernate.dialect.SQLServerDialect; import org.hibernate.dialect.SpannerDialect; -import org.hibernate.dialect.Sybase11Dialect; -import org.hibernate.dialect.SybaseASE157Dialect; -import org.hibernate.dialect.SybaseASE15Dialect; import org.hibernate.dialect.SybaseASEDialect; import org.hibernate.dialect.SybaseDialect; @@ -82,11 +59,10 @@ public Class resolve(final String name) { case "DB2z": return DB2zDialect.class; case "DB297": - return DB297Dialect.class; + return findCommunityDialect( name ); case "DB2390": - return DB2390Dialect.class; case "DB2390V8": - return DB2390V8Dialect.class; + return findCommunityDialect( name ); case "DB2400": return DB2400Dialect.class; case "DB2400V7R3": @@ -94,11 +70,9 @@ public Class resolve(final String name) { case "Derby": return DerbyDialect.class; case "DerbyTenFive": - return DerbyTenFiveDialect.class; case "DerbyTenSix": - return DerbyTenSixDialect.class; case "DerbyTenSeven": - return DerbyTenSevenDialect.class; + return findCommunityDialect( name ); case "H2": return H2Dialect.class; case "HANACloudColumnStore": @@ -112,19 +86,16 @@ public Class resolve(final String name) { case "MariaDB": return MariaDBDialect.class; case "MariaDB53": - return MariaDB53Dialect.class; case "MariaDB10": - return MariaDB10Dialect.class; case "MariaDB102": - return MariaDB102Dialect.class; + return findCommunityDialect( name ); case "MariaDB103": return MariaDB103Dialect.class; case "MySQL": return MySQLDialect.class; case "MySQL5": - return MySQL5Dialect.class; case "MySQL55": - return MySQL55Dialect.class; + return findCommunityDialect( name ); case "MySQL57": return MySQL57Dialect.class; case "MySQL8": @@ -132,11 +103,9 @@ public Class resolve(final String name) { case "Oracle": return OracleDialect.class; case "Oracle8i": - return Oracle8iDialect.class; case "Oracle9i": - return Oracle9iDialect.class; case "Oracle10g": - return Oracle10gDialect.class; + return findCommunityDialect( name ); case "Oracle12c": return Oracle12cDialect.class; case "PostgresPlus": @@ -144,43 +113,58 @@ public Class resolve(final String name) { case "PostgreSQL": return PostgreSQLDialect.class; case "PostgreSQL81": - return PostgreSQL81Dialect.class; case "PostgreSQL82": - return PostgreSQL82Dialect.class; case "PostgreSQL9": - return PostgreSQL9Dialect.class; case "PostgreSQL91": - return PostgreSQL91Dialect.class; case "PostgreSQL92": - return PostgreSQL92Dialect.class; case "PostgreSQL93": - return PostgreSQL93Dialect.class; case "PostgreSQL94": - return PostgreSQL94Dialect.class; case "PostgreSQL95": - return PostgreSQL95Dialect.class; + return findCommunityDialect( name ); + case "PostgreSQL10": + return PostgreSQL10Dialect.class; case "Spanner": return SpannerDialect.class; case "SQLServer": return SQLServerDialect.class; case "SQLServer2005": - return SQLServer2005Dialect.class; + return findCommunityDialect( name ); case "SQLServer2008": return SQLServer2008Dialect.class; case "SQLServer2012": return SQLServer2012Dialect.class; + case "SQLServer2016": + return SQLServer2016Dialect.class; case "Sybase": return SybaseDialect.class; case "Sybase11": - return Sybase11Dialect.class; + return findCommunityDialect( name ); case "SybaseASE": return SybaseASEDialect.class; case "SybaseASE15": - return SybaseASE15Dialect.class; case "SybaseASE157": - return SybaseASE157Dialect.class; + return findCommunityDialect( name ); } return null; } + private static Class findCommunityDialect(String name) { + try { + //noinspection unchecked + return (Class) DefaultDialectSelector.class.getClassLoader().loadClass( + "org.hibernate.community.dialect." + name + "Dialect" + ); + } + catch (ClassNotFoundException e) { + throw new StrategySelectionException( + "Couldn't load the dialect class for the `hibernate.dialect` short name [" + name + "], " + + "because the application is missing a dependency on the hibernate-community-dialects module. " + + "Hibernate 6.2 dropped support for database versions that are unsupported by vendors " + + "and code for old versions was moved to the hibernate-community-dialects module. " + + "For further information, read https://in.relation.to/2023/02/15/hibernate-orm-62-db-version-support/", + e + ); + } + } + } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/StrategySelectorImpl.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/StrategySelectorImpl.java index f86b8077b325..dab4471b77c9 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/StrategySelectorImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/internal/StrategySelectorImpl.java @@ -24,7 +24,7 @@ import org.jboss.logging.Logger; /** - * Standard implementation of the StrategySelector contract. + * Standard implementation of the {@link StrategySelector} contract. * * @author Steve Ebersole */ diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/package-info.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/package-info.java index 01cd733d8208..382ce0b95dd1 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/package-info.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/package-info.java @@ -6,7 +6,7 @@ */ /** - * Defines a feature-set around named registration of implementations of various contracts and the ability - * to select those implementations. + * Defines a feature set around named registration of implementations of various contracts + * and the ability to select those implementations. */ package org.hibernate.boot.registry.selector; diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/DialectSelector.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/DialectSelector.java index e83b56060766..e7100310fc2d 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/DialectSelector.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/DialectSelector.java @@ -8,10 +8,12 @@ import org.hibernate.boot.registry.selector.internal.LazyServiceResolver; import org.hibernate.dialect.Dialect; +import org.hibernate.service.JavaServiceLoadable; import org.hibernate.service.Service; /** * @author Christian Beikov */ +@JavaServiceLoadable public interface DialectSelector extends Service, LazyServiceResolver { } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/package-info.java b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/package-info.java index 587349b0095c..6de2377756a5 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/package-info.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/registry/selector/spi/package-info.java @@ -6,6 +6,9 @@ */ /** - * Defines actual contract used for strategy selection : {@link org.hibernate.boot.registry.selector.spi.StrategySelector}. + * Defines actual contract used for + * {@linkplain org.hibernate.boot.registry.selector.spi.StrategySelector strategy selection}. + * + * @link org.hibernate.boot.registry.selector.spi.StrategySelector */ package org.hibernate.boot.registry.selector.spi; diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadata.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadata.java index de2ca65206c3..b9558c194d52 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadata.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadata.java @@ -244,6 +244,10 @@ public void visitRegisteredComponents(Consumer consumer) { delegate().visitRegisteredComponents( consumer ); } + @Override + public Component getGenericComponent(Class componentClass) { + return delegate().getGenericComponent( componentClass ); + } @Override public NamedObjectRepository buildNamedQueryRepository(SessionFactoryImplementor sessionFactory) { diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadataBuildingOptions.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadataBuildingOptions.java index 663e8d7a4ac4..f927b004fe2d 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadataBuildingOptions.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingMetadataBuildingOptions.java @@ -19,6 +19,7 @@ import org.hibernate.cfg.MetadataSourceType; import org.hibernate.dialect.TimeZoneSupport; import org.hibernate.id.factory.IdentifierGeneratorFactory; +import org.hibernate.type.WrapperArrayHandling; import org.hibernate.type.spi.TypeConfiguration; import jakarta.persistence.SharedCacheMode; @@ -67,6 +68,11 @@ public TimeZoneSupport getTimeZoneSupport() { return delegate.getTimeZoneSupport(); } + @Override + public WrapperArrayHandling getWrapperArrayHandling() { + return delegate.getWrapperArrayHandling(); + } + @Override public List getBasicTypeRegistrations() { return delegate.getBasicTypeRegistrations(); @@ -172,7 +178,7 @@ public boolean isXmlMappingEnabled() { } @Override - public boolean disallowExtensionsInCdi() { - return delegate.disallowExtensionsInCdi(); + public boolean isAllowExtensionsInCdi() { + return delegate.isAllowExtensionsInCdi(); } } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingSessionFactoryBuilder.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingSessionFactoryBuilder.java index df34075fba79..d6f0eb3a7f0b 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingSessionFactoryBuilder.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/AbstractDelegatingSessionFactoryBuilder.java @@ -23,6 +23,7 @@ import org.hibernate.query.sqm.function.SqmFunctionDescriptor; import org.hibernate.resource.jdbc.spi.PhysicalConnectionHandlingMode; import org.hibernate.resource.jdbc.spi.StatementInspector; +import org.hibernate.type.format.FormatMapper; /** * Convenience base class for custom implementors of SessionFactoryBuilder, using delegation @@ -390,6 +391,18 @@ public T applyConnectionHandlingMode(PhysicalConnectionHandlingMode connectionHa return getThis(); } + @Override + public SessionFactoryBuilder applyJsonFormatMapper(FormatMapper jsonFormatMapper) { + delegate.applyJsonFormatMapper( jsonFormatMapper ); + return getThis(); + } + + @Override + public SessionFactoryBuilder applyXmlFormatMapper(FormatMapper xmlFormatMapper) { + delegate.applyXmlFormatMapper( xmlFormatMapper ); + return getThis(); + } + @Override public SessionFactory build() { return delegate.build(); diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/AdditionalMappingContributor.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/AdditionalMappingContributor.java index dfdf6e976f2c..2efaef2d4542 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/AdditionalMappingContributor.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/AdditionalMappingContributor.java @@ -8,6 +8,7 @@ import org.hibernate.Incubating; import org.hibernate.boot.ResourceStreamLocator; +import org.hibernate.service.JavaServiceLoadable; /** * Contract allowing pluggable contributions of additional mapping objects. @@ -17,6 +18,7 @@ * @author Steve Ebersole */ @Incubating +@JavaServiceLoadable public interface AdditionalMappingContributor { /** * The name of this contributor. May be {@code null}. diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/BootstrapContext.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/BootstrapContext.java index d85a3ae5dd19..99830505767e 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/BootstrapContext.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/BootstrapContext.java @@ -27,7 +27,7 @@ import org.hibernate.query.sqm.function.SqmFunctionDescriptor; import org.hibernate.query.sqm.function.SqmFunctionRegistry; import org.hibernate.resource.beans.spi.BeanInstanceProducer; -import org.hibernate.type.internal.BasicTypeImpl; +import org.hibernate.type.BasicType; import org.hibernate.type.spi.TypeConfiguration; import org.jboss.jandex.IndexView; @@ -232,10 +232,10 @@ default IdentifierGeneratorFactory getIdentifierGeneratorFactory() { /** * To support Envers. */ - void registerAdHocBasicType(BasicTypeImpl basicType); + void registerAdHocBasicType(BasicType basicType); /** * To support Envers. */ - BasicTypeImpl resolveAdHocBasicType(String key); + BasicType resolveAdHocBasicType(String key); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/InFlightMetadataCollector.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/InFlightMetadataCollector.java index 4aa89199f0c6..cbdc0d754199 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/InFlightMetadataCollector.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/InFlightMetadataCollector.java @@ -84,6 +84,8 @@ public interface InFlightMetadataCollector extends Mapping, MetadataImplementor void registerComponent(Component component); + void registerGenericComponent(Component component); + /** * Adds an import (for use in HQL). * diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderFactory.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderFactory.java index c233cf132d1e..368096e83467 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderFactory.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderFactory.java @@ -8,6 +8,7 @@ import org.hibernate.boot.Metadata; import org.hibernate.boot.MetadataSources; +import org.hibernate.service.JavaServiceLoadable; /** * An extension point for integrators that wish to hook into the process of how a {@link Metadata} is built. Intended as @@ -16,6 +17,7 @@ * * @author Gunnar Morling */ +@JavaServiceLoadable public interface MetadataBuilderFactory { /** diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderInitializer.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderInitializer.java index 9a3c983e12b1..69f329d040d2 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderInitializer.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuilderInitializer.java @@ -8,6 +8,7 @@ import org.hibernate.boot.MetadataBuilder; import org.hibernate.boot.registry.StandardServiceRegistry; +import org.hibernate.service.JavaServiceLoadable; /** * Contract for contributing to the initialization of {@link MetadataBuilder}. @@ -20,6 +21,7 @@ * * @since 5.0 */ +@JavaServiceLoadable public interface MetadataBuilderInitializer { void contribute(MetadataBuilder metadataBuilder, StandardServiceRegistry serviceRegistry); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuildingOptions.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuildingOptions.java index 4f03b5324532..5d68fba35e2a 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuildingOptions.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataBuildingOptions.java @@ -23,6 +23,7 @@ import org.hibernate.id.factory.IdentifierGeneratorFactory; import org.hibernate.metamodel.internal.ManagedTypeRepresentationResolverStandard; import org.hibernate.metamodel.spi.ManagedTypeRepresentationResolver; +import org.hibernate.type.WrapperArrayHandling; import org.hibernate.type.spi.TypeConfiguration; import jakarta.persistence.SharedCacheMode; @@ -70,6 +71,13 @@ public interface MetadataBuildingOptions { */ TimeZoneSupport getTimeZoneSupport(); + /** + * @return the {@link WrapperArrayHandling} to use for wrapper arrays {@code Byte[]} and {@code Character[]}. + * + * @see org.hibernate.cfg.AvailableSettings#WRAPPER_ARRAY_HANDLING + */ + WrapperArrayHandling getWrapperArrayHandling(); + default ManagedTypeRepresentationResolver getManagedTypeRepresentationResolver() { // for now always return the standard one return ManagedTypeRepresentationResolverStandard.INSTANCE; @@ -235,5 +243,15 @@ default boolean isXmlMappingEnabled() { /** * Check to see if extensions can be hosted in CDI */ - boolean disallowExtensionsInCdi(); + boolean isAllowExtensionsInCdi(); + + /** + * Check to see if extensions can be hosted in CDI + * + * @deprecated Use {@link #isAllowExtensionsInCdi()} + */ + @Deprecated(forRemoval = true) + default boolean disallowExtensionsInCdi() { + return !isAllowExtensionsInCdi(); + } } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataImplementor.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataImplementor.java index 8bb6bdd9eeba..eb696d07c139 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataImplementor.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataImplementor.java @@ -56,4 +56,6 @@ public interface MetadataImplementor extends Metadata { void initSessionFactory(SessionFactoryImplementor sessionFactoryImplementor); void visitRegisteredComponents(Consumer consumer); + + Component getGenericComponent(Class componentClass); } diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataSourcesContributor.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataSourcesContributor.java index e4f0ec0fdf02..a4677dedf437 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataSourcesContributor.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/MetadataSourcesContributor.java @@ -7,6 +7,7 @@ package org.hibernate.boot.spi; import org.hibernate.boot.MetadataSources; +import org.hibernate.service.JavaServiceLoadable; /** * A bootstrap process hook for contributing sources to {@link MetadataSources}. @@ -15,6 +16,7 @@ * * @since 5.0 */ +@JavaServiceLoadable public interface MetadataSourcesContributor { /** * Perform the process of contributing to the {@link MetadataSources}. diff --git a/hibernate-core/src/main/java/org/hibernate/boot/spi/SessionFactoryBuilderFactory.java b/hibernate-core/src/main/java/org/hibernate/boot/spi/SessionFactoryBuilderFactory.java index a2b9ae5f4674..d78c2e05a8b3 100644 --- a/hibernate-core/src/main/java/org/hibernate/boot/spi/SessionFactoryBuilderFactory.java +++ b/hibernate-core/src/main/java/org/hibernate/boot/spi/SessionFactoryBuilderFactory.java @@ -7,6 +7,7 @@ package org.hibernate.boot.spi; import org.hibernate.boot.SessionFactoryBuilder; +import org.hibernate.service.JavaServiceLoadable; /** * An extension point for integrators that wish to hook into the process of how a SessionFactory @@ -15,6 +16,7 @@ * * @author Steve Ebersole */ +@JavaServiceLoadable public interface SessionFactoryBuilderFactory { /** * The contract method. Return the {@link SessionFactoryBuilder}. May return {@code null} diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/BytecodeLogging.java b/hibernate-core/src/main/java/org/hibernate/bytecode/BytecodeLogging.java index 550bf2fe5151..f78d9b712045 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/BytecodeLogging.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/BytecodeLogging.java @@ -19,9 +19,5 @@ ) public interface BytecodeLogging { String LOGGER_NAME = SubSystemLogging.BASE + "bytecode"; - Logger LOGGER = Logger.getLogger( LOGGER_NAME ); - - boolean TRACE_ENABLED = LOGGER.isTraceEnabled(); - boolean DEBUG_ENABLED = LOGGER.isDebugEnabled(); } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/BiDirectionalAssociationHandler.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/BiDirectionalAssociationHandler.java index 7579386afa7c..1cb4542d6746 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/BiDirectionalAssociationHandler.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/BiDirectionalAssociationHandler.java @@ -35,6 +35,7 @@ import net.bytebuddy.dynamic.scaffold.InstrumentedType; import net.bytebuddy.implementation.Implementation; import net.bytebuddy.implementation.bytecode.ByteCodeAppender; +import net.bytebuddy.implementation.bytecode.assign.Assigner; import net.bytebuddy.jar.asm.MethodVisitor; import net.bytebuddy.jar.asm.Opcodes; import net.bytebuddy.jar.asm.Type; @@ -57,7 +58,14 @@ static Implementation wrap( return implementation; } String mappedBy = getMappedBy( persistentField, targetEntity, enhancementContext ); - if ( mappedBy == null || mappedBy.isEmpty() ) { + String bidirectionalAttributeName; + if ( mappedBy == null ) { + bidirectionalAttributeName = getMappedByManyToMany( persistentField, targetEntity, enhancementContext ); + } + else { + bidirectionalAttributeName = mappedBy; + } + if ( bidirectionalAttributeName == null || bidirectionalAttributeName.isEmpty() ) { if ( log.isInfoEnabled() ) { log.infof( "Bi-directional association not managed for field [%s#%s]: Could not find target field in [%s]", @@ -70,15 +78,24 @@ static Implementation wrap( } TypeDescription targetType = FieldLocator.ForClassHierarchy.Factory.INSTANCE.make( targetEntity ) - .locate( mappedBy ) + .locate( bidirectionalAttributeName ) .getField() .getType() .asErasure(); if ( persistentField.hasAnnotation( OneToOne.class ) ) { implementation = Advice.withCustomMapping() - .bind( CodeTemplates.FieldValue.class, persistentField.getFieldDescription() ) - .bind( CodeTemplates.MappedBy.class, mappedBy ) + .bind( + // We need to make the fieldValue writable for one-to-one to avoid stack overflows + // when unsetting the inverse field + new Advice.OffsetMapping.ForField.Resolved.Factory<>( + CodeTemplates.FieldValue.class, + persistentField.getFieldDescription(), + false, + Assigner.Typing.DYNAMIC + ) + ) + .bind( CodeTemplates.InverseSide.class, mappedBy != null ) .to( CodeTemplates.OneToOneHandler.class ) .wrap( implementation ); } @@ -86,7 +103,7 @@ static Implementation wrap( if ( persistentField.hasAnnotation( OneToMany.class ) ) { implementation = Advice.withCustomMapping() .bind( CodeTemplates.FieldValue.class, persistentField.getFieldDescription() ) - .bind( CodeTemplates.MappedBy.class, mappedBy ) + .bind( CodeTemplates.InverseSide.class, mappedBy != null ) .to( persistentField.getType().asErasure().isAssignableTo( Map.class ) ? CodeTemplates.OneToManyOnMapHandler.class : CodeTemplates.OneToManyOnCollectionHandler.class ) @@ -96,7 +113,7 @@ static Implementation wrap( if ( persistentField.hasAnnotation( ManyToOne.class ) ) { implementation = Advice.withCustomMapping() .bind( CodeTemplates.FieldValue.class, persistentField.getFieldDescription() ) - .bind( CodeTemplates.MappedBy.class, mappedBy ) + .bind( CodeTemplates.BidirectionalAttribute.class, bidirectionalAttributeName ) .to( CodeTemplates.ManyToOneHandler.class ) .wrap( implementation ); } @@ -116,12 +133,13 @@ static Implementation wrap( implementation = Advice.withCustomMapping() .bind( CodeTemplates.FieldValue.class, persistentField.getFieldDescription() ) - .bind( CodeTemplates.MappedBy.class, mappedBy ) + .bind( CodeTemplates.InverseSide.class, mappedBy != null ) + .bind( CodeTemplates.BidirectionalAttribute.class, bidirectionalAttributeName ) .to( CodeTemplates.ManyToManyHandler.class ) .wrap( implementation ); } - return new BiDirectionalAssociationHandler( implementation, targetEntity, targetType, mappedBy ); + return new BiDirectionalAssociationHandler( implementation, managedCtClass, persistentField, targetEntity, targetType, bidirectionalAttributeName ); } public static TypeDescription getTargetEntityClass(TypeDescription managedCtClass, AnnotatedFieldDescription persistentField) { @@ -186,16 +204,16 @@ private static TypeDescription.Generic target(AnnotatedFieldDescription persiste } private static String getMappedBy(AnnotatedFieldDescription target, TypeDescription targetEntity, ByteBuddyEnhancementContext context) { - String mappedBy = getMappedByNotManyToMany( target ); + final String mappedBy = getMappedByFromAnnotation( target ); if ( mappedBy == null || mappedBy.isEmpty() ) { - return getMappedByManyToMany( target, targetEntity, context ); + return null; } else { // HHH-13446 - mappedBy from annotation may not be a valid bi-directional association, verify by calling isValidMappedBy() - return isValidMappedBy( target, targetEntity, mappedBy, context ) ? mappedBy : ""; + return isValidMappedBy( target, targetEntity, mappedBy, context ) ? mappedBy : null; } } - + private static boolean isValidMappedBy(AnnotatedFieldDescription persistentField, TypeDescription targetEntity, String mappedBy, ByteBuddyEnhancementContext context) { try { FieldDescription f = FieldLocator.ForClassHierarchy.Factory.INSTANCE.make( targetEntity ).locate( mappedBy ).getField(); @@ -207,8 +225,7 @@ private static boolean isValidMappedBy(AnnotatedFieldDescription persistentField return false; } } - - private static String getMappedByNotManyToMany(AnnotatedFieldDescription target) { + private static String getMappedByFromAnnotation(AnnotatedFieldDescription target) { try { AnnotationDescription.Loadable oto = target.getAnnotation( OneToOne.class ); if ( oto != null ) { @@ -235,7 +252,7 @@ private static String getMappedByManyToMany(AnnotatedFieldDescription target, Ty for ( FieldDescription f : targetEntity.getDeclaredFields() ) { AnnotatedFieldDescription annotatedF = new AnnotatedFieldDescription( context, f ); if ( context.isPersistentField( annotatedF ) - && target.getName().equals( getMappedByNotManyToMany( annotatedF ) ) + && target.getName().equals( getMappedBy( annotatedF, entityType( annotatedF.getType() ), context ) ) && target.getDeclaringType().asErasure().isAssignableTo( entityType( annotatedF.getType() ) ) ) { if ( log.isDebugEnabled() ) { log.debugf( @@ -267,21 +284,28 @@ private static TypeDescription entityType(TypeDescription.Generic type) { private final Implementation delegate; + private final TypeDescription entity; + private final AnnotatedFieldDescription field; + private final TypeDescription targetEntity; private final TypeDescription targetType; - private final String mappedBy; + private final String bidirectionalAttributeName; private BiDirectionalAssociationHandler( Implementation delegate, + TypeDescription entity, + AnnotatedFieldDescription field, TypeDescription targetEntity, TypeDescription targetType, - String mappedBy) { + String bidirectionalAttributeName) { this.delegate = delegate; + this.entity = entity; + this.field = field; this.targetEntity = targetEntity; this.targetType = targetType; - this.mappedBy = mappedBy; + this.bidirectionalAttributeName = bidirectionalAttributeName; } @Override @@ -315,11 +339,21 @@ public void visitMethodInsn(int opcode, String owner, String name, String desc, super.visitMethodInsn( Opcodes.INVOKEVIRTUAL, targetEntity.getInternalName(), - EnhancerConstants.PERSISTENT_FIELD_READER_PREFIX + mappedBy, + EnhancerConstants.PERSISTENT_FIELD_READER_PREFIX + bidirectionalAttributeName, Type.getMethodDescriptor( Type.getType( targetType.getDescriptor() ) ), false ); } + else if ( name.equals( "getterSelf" ) ) { + super.visitVarInsn( Opcodes.ALOAD, 0 ); + super.visitMethodInsn( + Opcodes.INVOKEVIRTUAL, + entity.getInternalName(), + EnhancerConstants.PERSISTENT_FIELD_READER_PREFIX + field.getName(), + Type.getMethodDescriptor( Type.getType( field.getDescriptor() ) ), + false + ); + } else if ( name.equals( "setterSelf" ) ) { super.visitInsn( Opcodes.POP ); super.visitTypeInsn( Opcodes.CHECKCAST, targetEntity.getInternalName() ); @@ -327,7 +361,7 @@ else if ( name.equals( "setterSelf" ) ) { super.visitMethodInsn( Opcodes.INVOKEVIRTUAL, targetEntity.getInternalName(), - EnhancerConstants.PERSISTENT_FIELD_WRITER_PREFIX + mappedBy, + EnhancerConstants.PERSISTENT_FIELD_WRITER_PREFIX + bidirectionalAttributeName, Type.getMethodDescriptor( Type.getType( void.class ), Type.getType( targetType.getDescriptor() ) ), false ); @@ -339,7 +373,7 @@ else if ( name.equals( "setterNull" ) ) { super.visitMethodInsn( Opcodes.INVOKEVIRTUAL, targetEntity.getInternalName(), - EnhancerConstants.PERSISTENT_FIELD_WRITER_PREFIX + mappedBy, + EnhancerConstants.PERSISTENT_FIELD_WRITER_PREFIX + bidirectionalAttributeName, Type.getMethodDescriptor( Type.getType( void.class ), Type.getType( targetType.getDescriptor() ) ), false ); @@ -368,11 +402,11 @@ public boolean equals(final Object o) { return Objects.equals( delegate, that.delegate ) && Objects.equals( targetEntity, that.targetEntity ) && Objects.equals( targetType, that.targetType ) && - Objects.equals( mappedBy, that.mappedBy ); + Objects.equals( bidirectionalAttributeName, that.bidirectionalAttributeName ); } @Override public int hashCode() { - return Objects.hash( delegate, targetEntity, targetType, mappedBy ); + return Objects.hash( delegate, targetEntity, targetType, bidirectionalAttributeName ); } } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/ByteBuddyEnhancementContext.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/ByteBuddyEnhancementContext.java index 91d5454ac78c..681ffe310611 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/ByteBuddyEnhancementContext.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/ByteBuddyEnhancementContext.java @@ -15,6 +15,7 @@ import org.hibernate.bytecode.enhance.internal.bytebuddy.EnhancerImpl.AnnotatedFieldDescription; import org.hibernate.bytecode.enhance.spi.EnhancementContext; +import jakarta.persistence.Embedded; import net.bytebuddy.description.field.FieldDescription; import net.bytebuddy.description.method.MethodDescription; import net.bytebuddy.description.type.TypeDescription; @@ -67,6 +68,10 @@ public boolean isPersistentField(AnnotatedFieldDescription field) { return enhancementContext.isPersistentField( field ); } + public boolean isCompositeField(AnnotatedFieldDescription field) { + return isCompositeClass( field.getType().asErasure() ); + } + public AnnotatedFieldDescription[] order(AnnotatedFieldDescription[] persistentFields) { return (AnnotatedFieldDescription[]) enhancementContext.order( persistentFields ); } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/CodeTemplates.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/CodeTemplates.java index cddc79f1cca2..166c3ca27781 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/CodeTemplates.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/CodeTemplates.java @@ -356,15 +356,24 @@ static class CompositeOwnerDirtyCheckingHandler { static class OneToOneHandler { @Advice.OnMethodEnter - static void enter(@FieldValue Object field, @Advice.Argument(0) Object argument, @MappedBy String mappedBy) { - if ( field != null && Hibernate.isPropertyInitialized( field, mappedBy ) && argument != null ) { - setterNull( field, null ); + static void enter(@FieldValue Object field, @Advice.Argument(0) Object argument, @InverseSide boolean inverseSide) { + // Unset the inverse attribute, which possibly initializes the old value, + // only if this is the inverse side, or the old value is already initialized + if ( ( inverseSide || Hibernate.isInitialized( field ) ) && getterSelf() != null ) { + // We copy the old value, then set the field to null which we must do before + // unsetting the inverse attribute, as we'd otherwise run into a stack overflow situation + // The field is writable, so setting it to null here is actually a field write. + Object fieldCopy = field; + field = null; + setterNull( fieldCopy, null ); } } @Advice.OnMethodExit - static void exit(@Advice.This Object self, @Advice.Argument(0) Object argument, @MappedBy String mappedBy) { - if ( argument != null && Hibernate.isPropertyInitialized( argument, mappedBy ) && getter( argument ) != self ) { + static void exit(@Advice.This Object self, @Advice.Argument(0) Object argument, @InverseSide boolean inverseSide) { + // Update the inverse attribute, which possibly initializes the argument value, + // only if this is the inverse side, or the argument value is already initialized + if ( argument != null && ( inverseSide || Hibernate.isInitialized( argument ) ) && getter( argument ) != self ) { setterSelf( argument, self ); } } @@ -374,6 +383,11 @@ static Object getter(Object target) { throw new AssertionError(); } + static Object getterSelf() { + // is replaced by the actual method call + throw new AssertionError(); + } + static void setterNull(Object target, Object argument) { // is replaced by the actual method call throw new AssertionError(); @@ -387,24 +401,30 @@ static void setterSelf(Object target, Object argument) { static class OneToManyOnCollectionHandler { @Advice.OnMethodEnter - static void enter(@FieldValue Collection field, @Advice.Argument(0) Collection argument, @MappedBy String mappedBy) { - if ( field != null && Hibernate.isPropertyInitialized( field, mappedBy ) ) { + static void enter(@FieldValue Collection field, @Advice.Argument(0) Collection argument, @InverseSide boolean inverseSide) { + // If this is the inverse side or the old collection is already initialized, + // we must unset the respective ManyToOne of the old collection elements, + // because only the owning side is responsible for persisting the state. + if ( ( inverseSide || Hibernate.isInitialized( field ) ) && getterSelf() != null ) { Object[] array = field.toArray(); - for ( Object array1 : array ) { - if ( argument == null || !argument.contains( array1 ) ) { - setterNull( array1, null ); + for ( int i = 0; i < array.length; i++ ) { + if ( ( inverseSide || Hibernate.isInitialized( array[i] ) ) && ( argument == null || !argument.contains( array[i] ) ) ) { + setterNull( array[i], null ); } } } } @Advice.OnMethodExit - static void exit(@Advice.This Object self, @Advice.Argument(0) Collection argument, @MappedBy String mappedBy) { - if ( argument != null && Hibernate.isPropertyInitialized( argument, mappedBy ) ) { + static void exit(@Advice.This Object self, @Advice.Argument(0) Collection argument, @InverseSide boolean inverseSide) { + // If this is the inverse side or the new collection is already initialized, + // we must set the respective ManyToOne on the new collection elements, + // because only the owning side is responsible for persisting the state. + if ( argument != null && ( inverseSide || Hibernate.isInitialized( argument ) ) ) { Object[] array = argument.toArray(); - for ( Object array1 : array ) { - if ( Hibernate.isPropertyInitialized( array1, mappedBy ) && getter( array1 ) != self ) { - setterSelf( array1, self ); + for ( int i = 0; i < array.length; i++ ) { + if ( ( inverseSide || Hibernate.isInitialized( array[i] ) ) && getter( array[i] ) != self ) { + setterSelf( array[i], self ); } } } @@ -415,6 +435,11 @@ static Object getter(Object target) { throw new AssertionError(); } + static Object getterSelf() { + // is replaced by the actual method call + throw new AssertionError(); + } + static void setterNull(Object target, Object argument) { // is replaced by the actual method call throw new AssertionError(); @@ -428,24 +453,31 @@ static void setterSelf(Object target, Object argument) { static class OneToManyOnMapHandler { @Advice.OnMethodEnter - static void enter(@FieldValue Map field, @Advice.Argument(0) Map argument, @MappedBy String mappedBy) { - if ( field != null && Hibernate.isPropertyInitialized( field, mappedBy ) ) { + static void enter(@FieldValue Map field, @Advice.Argument(0) Map argument, @InverseSide boolean inverseSide) { + // If this is the inverse side or the old collection is already initialized, + // we must unset the respective ManyToOne of the old collection elements, + // because only the owning side is responsible for persisting the state. + if ( ( inverseSide || Hibernate.isInitialized( field ) ) && getterSelf() != null ) { Object[] array = field.values().toArray(); - for ( Object array1 : array ) { - if ( argument == null || !argument.values().contains( array1 ) ) { - setterNull( array1, null ); + for ( int i = 0; i < array.length; i++ ) { + if ( ( inverseSide || Hibernate.isInitialized( array[i] ) ) + && ( argument == null || !argument.containsValue( array[i] ) ) ) { + setterNull( array[i], null ); } } } } @Advice.OnMethodExit - static void exit(@Advice.This Object self, @Advice.Argument(0) Map argument, @MappedBy String mappedBy) { - if ( argument != null && Hibernate.isPropertyInitialized( argument, mappedBy ) ) { + static void exit(@Advice.This Object self, @Advice.Argument(0) Map argument, @InverseSide boolean inverseSide) { + // If this is the inverse side or the new collection is already initialized, + // we must set the respective ManyToOne on the new collection elements, + // because only the owning side is responsible for persisting the state. + if ( argument != null && ( inverseSide || Hibernate.isInitialized( argument ) ) ) { Object[] array = argument.values().toArray(); - for ( Object array1 : array ) { - if ( Hibernate.isPropertyInitialized( array1, mappedBy ) && getter( array1 ) != self ) { - setterSelf( array1, self ); + for ( int i = 0; i < array.length; i++ ) { + if ( ( inverseSide || Hibernate.isInitialized( array[i] ) ) && getter( array[i] ) != self ) { + setterSelf( array[i], self ); } } } @@ -456,6 +488,11 @@ static Object getter(Object target) { throw new AssertionError(); } + static Object getterSelf() { + // is replaced by the actual method call + throw new AssertionError(); + } + static void setterNull(Object target, Object argument) { // is replaced with the actual setter call during instrumentation. throw new AssertionError(); @@ -469,8 +506,9 @@ static void setterSelf(Object target, Object argument) { static class ManyToOneHandler { @Advice.OnMethodEnter - static void enter(@Advice.This Object self, @FieldValue Object field, @MappedBy String mappedBy) { - if ( field != null && Hibernate.isPropertyInitialized( field, mappedBy ) ) { + static void enter(@Advice.This Object self, @FieldValue Object field, @BidirectionalAttribute String inverseAttribute) { + // This is always the owning side, so we only need to update the inverse side if the collection is initialized + if ( getterSelf() != null && Hibernate.isPropertyInitialized( field, inverseAttribute ) ) { Collection c = getter( field ); if ( c != null ) { c.remove( self ); @@ -479,8 +517,9 @@ static void enter(@Advice.This Object self, @FieldValue Object field, @MappedBy } @Advice.OnMethodExit - static void exit(@Advice.This Object self, @Advice.Argument(0) Object argument, @MappedBy String mappedBy) { - if ( argument != null && Hibernate.isPropertyInitialized( argument, mappedBy ) ) { + static void exit(@Advice.This Object self, @Advice.Argument(0) Object argument, @BidirectionalAttribute String inverseAttribute) { + // This is always the owning side, so we only need to update the inverse side if the collection is initialized + if ( argument != null && Hibernate.isPropertyInitialized( argument, inverseAttribute ) ) { Collection c = getter( argument ); if ( c != null && !c.contains( self ) ) { c.add( self ); @@ -492,29 +531,41 @@ static Collection getter(Object target) { // is replaced by the actual method call throw new AssertionError(); } + + static Object getterSelf() { + // is replaced by the actual method call + throw new AssertionError(); + } } static class ManyToManyHandler { @Advice.OnMethodEnter - static void enter(@Advice.This Object self, @FieldValue Collection field, @Advice.Argument(0) Collection argument, @MappedBy String mappedBy) { - if ( field != null && Hibernate.isPropertyInitialized( field, mappedBy ) ) { + static void enter(@Advice.This Object self, @FieldValue Collection field, @Advice.Argument(0) Collection argument, @InverseSide boolean inverseSide, @BidirectionalAttribute String bidirectionalAttribute) { + // If this is the inverse side or the old collection is already initialized, + // we must remove self from the respective old collection elements inverse collections, + // because only the owning side is responsible for persisting the state. + if ( ( inverseSide || Hibernate.isInitialized( field ) ) && getterSelf() != null ) { Object[] array = field.toArray(); - for ( Object array1 : array ) { - if ( argument == null || !argument.contains( array1 ) ) { - getter( array1 ).remove( self ); + for ( int i = 0; i < array.length; i++ ) { + if ( ( inverseSide || Hibernate.isPropertyInitialized( array[i], bidirectionalAttribute ) ) + && ( argument == null || !argument.contains( array[i] ) ) ) { + getter( array[i] ).remove( self ); } } } } @Advice.OnMethodExit - static void exit(@Advice.This Object self, @Advice.Argument(0) Collection argument, @MappedBy String mappedBy) { - if ( argument != null && Hibernate.isPropertyInitialized( argument, mappedBy ) ) { + static void exit(@Advice.This Object self, @Advice.Argument(0) Collection argument, @InverseSide boolean inverseSide, @BidirectionalAttribute String bidirectionalAttribute) { + // If this is the inverse side or the new collection is already initialized, + // we must add self to the respective new collection elements inverse collections, + // because only the owning side is responsible for persisting the state. + if ( argument != null && ( inverseSide || Hibernate.isInitialized( argument ) ) ) { Object[] array = argument.toArray(); for ( Object array1 : array ) { - if ( Hibernate.isPropertyInitialized( array1, mappedBy ) ) { + if ( inverseSide || Hibernate.isPropertyInitialized( array1, bidirectionalAttribute ) ) { Collection c = getter( array1 ); - if ( c != self && c != null ) { + if ( c != null && !c.contains( self ) ) { c.add( self ); } } @@ -526,6 +577,11 @@ static Collection getter(Object self) { // is replaced by the actual method call throw new AssertionError(); } + + static Object getterSelf() { + // is replaced by the actual method call + throw new AssertionError(); + } } @Retention(RetentionPolicy.RUNTIME) @@ -539,22 +595,33 @@ static Collection getter(Object self) { } @Retention(RetentionPolicy.RUNTIME) - @interface MappedBy { + @interface InverseSide { + + } + + @Retention(RetentionPolicy.RUNTIME) + @interface BidirectionalAttribute { } // mapping to get private field from superclass by calling the enhanced reader, for use when field is not visible static class GetterMapping implements Advice.OffsetMapping { + private final TypeDescription.Generic returnType; private final FieldDescription persistentField; GetterMapping(FieldDescription persistentField) { + this( persistentField, persistentField.getType() ); + } + + GetterMapping(FieldDescription persistentField, TypeDescription.Generic returnType) { this.persistentField = persistentField; + this.returnType = returnType; } @Override public Target resolve(TypeDescription instrumentedType, MethodDescription instrumentedMethod, Assigner assigner, Advice.ArgumentHandler argumentHandler, Sort sort) { - MethodDescription.Token signature = new MethodDescription.Token( EnhancerConstants.PERSISTENT_FIELD_READER_PREFIX + persistentField.getName(), Opcodes.ACC_PUBLIC, persistentField.getType() ); - MethodDescription method = new MethodDescription.Latent( instrumentedType.getSuperClass().asErasure(), signature ); + MethodDescription.Token signature = new MethodDescription.Token( EnhancerConstants.PERSISTENT_FIELD_READER_PREFIX + persistentField.getName() , Opcodes.ACC_PUBLIC, returnType ); + MethodDescription method = new MethodDescription.Latent( persistentField.getDeclaringType().asErasure(), signature ); return new Target.AbstractReadOnlyAdapter() { @Override diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/InlineDirtyCheckingHandler.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/InlineDirtyCheckingHandler.java index 34847c8c4a29..f263f9a31b88 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/InlineDirtyCheckingHandler.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/InlineDirtyCheckingHandler.java @@ -77,10 +77,10 @@ else if ( !persistentField.hasAnnotation( Id.class ) ); } - if ( enhancementContext.isCompositeClass( persistentField.getType().asErasure() ) - && persistentField.hasAnnotation( Embedded.class ) - // Don't do composite owner tracking for records - && !persistentField.getType().isRecord() ) { + if ( enhancementContext.isCompositeField( persistentField ) + && !persistentField.hasAnnotation( EmbeddedId.class ) + // Don't do composite owner tracking for records + && !persistentField.getType().isRecord() ) { // HHH-13759 - Call getter on superclass if field is not visible // An embedded field won't be visible if declared private in a superclass @@ -88,7 +88,7 @@ else if ( !persistentField.hasAnnotation( Id.class ) Advice.WithCustomMapping advice = Advice.withCustomMapping(); advice = persistentField.isVisibleTo( managedCtClass ) ? advice.bind( CodeTemplates.FieldValue.class, persistentField.getFieldDescription() ) - : advice.bind( CodeTemplates.FieldValue.class, new CodeTemplates.GetterMapping( persistentField.getFieldDescription() ) ); + : advice.bind( CodeTemplates.FieldValue.class, new CodeTemplates.GetterMapping( persistentField.getFieldDescription(), persistentField.getGetter().get().getReturnType() ) ); implementation = advice .bind( CodeTemplates.FieldName.class, persistentField.getName() ) diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/PersistentAttributeTransformer.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/PersistentAttributeTransformer.java index 2b7cccac1d12..256302cab038 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/PersistentAttributeTransformer.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/PersistentAttributeTransformer.java @@ -270,14 +270,13 @@ DynamicType.Builder applyTo(DynamicType.Builder builder) { TypeDescription.VOID, Visibility.PUBLIC ) - .withParameters( enhancedField.getType().asErasure() ) + .withParameters( enhancedField.asDefined().getType().asErasure() ) .intercept( fieldWriter( enhancedField ) ); } if ( !compositeOwner && !enhancementContext.isMappedSuperclassClass( managedCtClass ) - && enhancedField.hasAnnotation( Embedded.class ) - && enhancementContext.isCompositeClass( enhancedField.getType().asErasure() ) + && enhancementContext.isCompositeField( enhancedField ) && enhancementContext.doDirtyCheckingInline( managedCtClass ) ) { compositeOwner = true; } @@ -396,12 +395,12 @@ public Size apply( MethodDescription instrumentedMethod ) { methodVisitor.visitVarInsn( Opcodes.ALOAD, 0 ); - methodVisitor.visitVarInsn( Type.getType( persistentField.getType().asErasure().getDescriptor() ).getOpcode( Opcodes.ILOAD ), 1 ); + methodVisitor.visitVarInsn( Type.getType( persistentField.asDefined().getType().asErasure().getDescriptor() ).getOpcode( Opcodes.ILOAD ), 1 ); methodVisitor.visitMethodInsn( Opcodes.INVOKESPECIAL, managedCtClass.getSuperClass().asErasure().getInternalName(), EnhancerConstants.PERSISTENT_FIELD_WRITER_PREFIX + persistentField.getName(), - Type.getMethodDescriptor( Type.getType( void.class ), Type.getType( persistentField.getType().asErasure().getDescriptor() ) ), + Type.getMethodDescriptor( Type.getType( void.class ), Type.getType( persistentField.asDefined().getType().asErasure().getDescriptor() ) ), false ); methodVisitor.visitInsn( Opcodes.RETURN ); diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/BytecodeInterceptorLogging.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/BytecodeInterceptorLogging.java index 1c14de6f75eb..9ef8c87315b9 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/BytecodeInterceptorLogging.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/BytecodeInterceptorLogging.java @@ -34,9 +34,6 @@ public interface BytecodeInterceptorLogging extends BasicLogger { Logger LOGGER = Logger.getLogger( LOGGER_NAME ); BytecodeInterceptorLogging MESSAGE_LOGGER = Logger.getMessageLogger(BytecodeInterceptorLogging.class, LOGGER_NAME ); - boolean TRACE_ENABLED = LOGGER.isTraceEnabled(); - boolean DEBUG_ENABLED = LOGGER.isDebugEnabled(); - @LogMessage(level = WARN) @Message( id = 90005901, diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/EnhancementAsProxyLazinessInterceptor.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/EnhancementAsProxyLazinessInterceptor.java index 09252512236c..0c601b2d6c13 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/EnhancementAsProxyLazinessInterceptor.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/EnhancementAsProxyLazinessInterceptor.java @@ -15,10 +15,11 @@ import org.hibernate.bytecode.BytecodeLogging; import org.hibernate.bytecode.enhance.spi.LazyPropertyInitializer; import org.hibernate.engine.spi.EntityKey; -import org.hibernate.engine.spi.SelfDirtinessTracker; import org.hibernate.engine.spi.SharedSessionContractImplementor; import org.hibernate.internal.util.collections.ArrayHelper; +import org.hibernate.metamodel.mapping.AttributeMapping; import org.hibernate.persister.entity.EntityPersister; +import org.hibernate.type.CollectionType; import org.hibernate.type.CompositeType; import org.hibernate.type.Type; @@ -67,7 +68,7 @@ public EnhancementAsProxyLazinessInterceptor( collectionAttributeNames = new HashSet<>(); for ( int i = 0; i < propertyTypes.length; i++ ) { Type propertyType = propertyTypes[i]; - if ( propertyType.isCollectionType() ) { + if ( propertyType instanceof CollectionType ) { collectionAttributeNames.add( entityPersister.getPropertyNames()[i] ); } } @@ -102,36 +103,40 @@ protected Object handleRead(Object target, String attributeName, Object value) { return EnhancementHelper.performWork( this, (session, isTempSession) -> { - final Object[] writtenValues; + final Object[] writtenAttributeValues; + final AttributeMapping[] writtenAttributeMappings; - final EntityPersister entityPersister = session.getFactory() - .getRuntimeMetamodels() - .getMappingMetamodel() - .getEntityDescriptor( getEntityName() ); + final EntityPersister entityPersister = + session.getFactory().getMappingMetamodel() + .getEntityDescriptor( getEntityName() ); if ( writtenFieldNames != null && !writtenFieldNames.isEmpty() ) { // enhancement has dirty-tracking available and at least one attribute was explicitly set if ( writtenFieldNames.contains( attributeName ) ) { - // the requested attribute was one of the attributes explicitly set, we can just return the explicitly set value + // the requested attribute was one of the attributes explicitly set, + // we can just return the explicitly-set value return entityPersister.getPropertyValue( target, attributeName ); } - // otherwise we want to save all of the explicitly set values in anticipation of + // otherwise we want to save all the explicitly-set values in anticipation of // the force initialization below so that we can "replay" them after the // initialization - writtenValues = new Object[writtenFieldNames.size()]; + writtenAttributeValues = new Object[writtenFieldNames.size()]; + writtenAttributeMappings = new AttributeMapping[writtenFieldNames.size()]; int index = 0; for ( String writtenFieldName : writtenFieldNames ) { - writtenValues[index] = entityPersister.getPropertyValue( target, writtenFieldName ); + writtenAttributeMappings[index] = entityPersister.findAttributeMapping( writtenFieldName ); + writtenAttributeValues[index] = writtenAttributeMappings[index].getValue( target ); index++; } } else { - writtenValues = null; + writtenAttributeValues = null; + writtenAttributeMappings = null; } final Object initializedValue = forceInitialize( @@ -143,18 +148,13 @@ protected Object handleRead(Object target, String attributeName, Object value) { setInitialized(); - if ( writtenValues != null ) { + if ( writtenAttributeValues != null ) { // here is the replaying of the explicitly set values we prepared above - for ( String writtenFieldName : writtenFieldNames ) { - final int size = entityPersister.getNumberOfAttributeMappings(); - for ( int index = 0; index < size; index++ ) { - if ( writtenFieldName.contains( entityPersister.getAttributeMapping( index ).getAttributeName() ) ) { - entityPersister.setValue( - target, - index, - writtenValues[index] - ); - } + for ( int i = 0; i < writtenAttributeMappings.length; i++ ) { + final AttributeMapping attribute = writtenAttributeMappings[i]; + attribute.setValue( target, writtenAttributeValues[i] ); + if ( inLineDirtyChecking ) { + asSelfDirtinessTracker( target ).$$_hibernate_trackChange( attribute.getAttributeName() ); } } writtenFieldNames.clear(); @@ -246,7 +246,7 @@ protected Object handleWrite(Object target, String attributeName, Object oldValu if ( identifierAttributeNames.contains( attributeName ) ) { // it is illegal for the identifier value to be changed. Normally Hibernate - // validates this during flush. However, here it is dangerous to just allow the + // validates this during flush. However, here it's dangerous to just allow the // new value to be set and continue on waiting for the flush for validation // because this interceptor manages the entity's entry in the PC itself. So // just do the check here up-front diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/LazyAttributeDescriptor.java b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/LazyAttributeDescriptor.java index 0a2b54aff487..8f0ba8647737 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/LazyAttributeDescriptor.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/interceptor/LazyAttributeDescriptor.java @@ -7,6 +7,7 @@ package org.hibernate.bytecode.enhance.spi.interceptor; import org.hibernate.mapping.Property; +import org.hibernate.type.CollectionType; import org.hibernate.type.Type; /** @@ -21,7 +22,7 @@ public static LazyAttributeDescriptor from( int lazyIndex) { String fetchGroupName = property.getLazyGroup(); if ( fetchGroupName == null ) { - fetchGroupName = property.getType().isCollectionType() + fetchGroupName = property.getType() instanceof CollectionType ? property.getName() : "DEFAULT"; } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataNonPojoImpl.java b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataNonPojoImpl.java index 4abf302fb3ce..172e02e56e2e 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataNonPojoImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataNonPojoImpl.java @@ -16,6 +16,8 @@ import org.hibernate.engine.spi.PersistentAttributeInterceptor; import org.hibernate.engine.spi.SharedSessionContractImplementor; +import org.checkerframework.checker.nullness.qual.Nullable; + /** * BytecodeEnhancementMetadata implementation for non-POJO models, mainly * {@link org.hibernate.metamodel.RepresentationMode#MAP} @@ -78,12 +80,12 @@ public PersistentAttributeInterceptable createEnhancedProxy(EntityKey keyToLoad, } @Override - public LazyAttributeLoadingInterceptor extractInterceptor(Object entity) throws NotInstrumentedException { + public @Nullable LazyAttributeLoadingInterceptor extractInterceptor(Object entity) throws NotInstrumentedException { throw new NotInstrumentedException( errorMsg ); } @Override - public BytecodeLazyAttributeInterceptor extractLazyInterceptor(Object entity) throws NotInstrumentedException { + public @Nullable BytecodeLazyAttributeInterceptor extractLazyInterceptor(Object entity) throws NotInstrumentedException { throw new NotInstrumentedException( errorMsg ); } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataPojoImpl.java b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataPojoImpl.java index 15c0ef57e454..148ad7093bf9 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataPojoImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeEnhancementMetadataPojoImpl.java @@ -27,6 +27,8 @@ import org.hibernate.persister.entity.EntityPersister; import org.hibernate.type.CompositeType; +import org.checkerframework.checker.nullness.qual.Nullable; + import static org.hibernate.engine.internal.ManagedTypeHelper.asPersistentAttributeInterceptable; import static org.hibernate.engine.internal.ManagedTypeHelper.isPersistentAttributeInterceptableType; import static org.hibernate.engine.internal.ManagedTypeHelper.processIfSelfDirtinessTracker; @@ -136,7 +138,7 @@ public boolean isAttributeLoaded(Object entity, String attributeName) { } @Override - public LazyAttributeLoadingInterceptor extractInterceptor(Object entity) throws NotInstrumentedException { + public @Nullable LazyAttributeLoadingInterceptor extractInterceptor(Object entity) throws NotInstrumentedException { return (LazyAttributeLoadingInterceptor) extractLazyInterceptor( entity ); } @@ -257,7 +259,7 @@ public void injectInterceptor( } @Override - public BytecodeLazyAttributeInterceptor extractLazyInterceptor(Object entity) throws NotInstrumentedException { + public @Nullable BytecodeLazyAttributeInterceptor extractLazyInterceptor(Object entity) throws NotInstrumentedException { if ( !enhancedForLazyLoading ) { throw new NotInstrumentedException( "Entity class [" + entityClass.getName() + "] is not enhanced for lazy loading" ); } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeProviderInitiator.java b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeProviderInitiator.java index 9b055d6a9279..ee7ea7cdf2b7 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeProviderInitiator.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/BytecodeProviderInitiator.java @@ -6,23 +6,40 @@ */ package org.hibernate.bytecode.internal; +import java.util.Collection; +import java.util.Iterator; import java.util.Map; +import java.util.ServiceLoader; import org.hibernate.Internal; import org.hibernate.boot.registry.StandardServiceInitiator; +import org.hibernate.boot.registry.classloading.spi.ClassLoaderService; import org.hibernate.bytecode.spi.BytecodeProvider; import org.hibernate.internal.CoreMessageLogger; -import org.hibernate.internal.util.config.ConfigurationHelper; import org.hibernate.service.spi.ServiceRegistryImplementor; import org.jboss.logging.Logger; -import static org.hibernate.cfg.AvailableSettings.BYTECODE_PROVIDER; +import static org.hibernate.internal.util.NullnessUtil.castNonNull; public final class BytecodeProviderInitiator implements StandardServiceInitiator { + /** + * @deprecated Register a {@link BytecodeProvider} through Java {@linkplain java.util.ServiceLoader services}. + */ + @Deprecated( forRemoval = true ) public static final String BYTECODE_PROVIDER_NAME_BYTEBUDDY = "bytebuddy"; + + /** + * @deprecated Register a {@link BytecodeProvider} through Java {@linkplain java.util.ServiceLoader services}. + */ + @Deprecated( forRemoval = true ) public static final String BYTECODE_PROVIDER_NAME_NONE = "none"; + + /** + * @deprecated Deprecated with no replacement + */ + @Deprecated( forRemoval = true ) public static final String BYTECODE_PROVIDER_NAME_DEFAULT = BYTECODE_PROVIDER_NAME_BYTEBUDDY; /** @@ -32,8 +49,9 @@ public final class BytecodeProviderInitiator implements StandardServiceInitiator @Override public BytecodeProvider initiateService(Map configurationValues, ServiceRegistryImplementor registry) { - String provider = ConfigurationHelper.getString( BYTECODE_PROVIDER, configurationValues, BYTECODE_PROVIDER_NAME_DEFAULT ); - return buildBytecodeProvider( provider ); + final ClassLoaderService classLoaderService = castNonNull( registry.getService( ClassLoaderService.class ) ); + final Collection bytecodeProviders = classLoaderService.loadJavaServices( BytecodeProvider.class ); + return getBytecodeProvider( bytecodeProviders ); } @Override @@ -43,12 +61,30 @@ public Class getServiceInitiated() { @Internal public static BytecodeProvider buildDefaultBytecodeProvider() { - return buildBytecodeProvider( BYTECODE_PROVIDER_NAME_BYTEBUDDY ); + // Use BytecodeProvider's ClassLoader to ensure we can find the service + return getBytecodeProvider( ServiceLoader.load( + BytecodeProvider.class, + BytecodeProvider.class.getClassLoader() + ) ); } @Internal - public static BytecodeProvider buildBytecodeProvider(String providerName) { + public static BytecodeProvider getBytecodeProvider(Iterable bytecodeProviders) { + final Iterator iterator = bytecodeProviders.iterator(); + if ( !iterator.hasNext() ) { + // If no BytecodeProvider service is available, default to the "no-op" enhancer + return new org.hibernate.bytecode.internal.none.BytecodeProviderImpl(); + } + + final BytecodeProvider provider = iterator.next(); + if ( iterator.hasNext() ) { + throw new IllegalStateException( "Found multiple BytecodeProvider service registrations, cannot determine which one to use" ); + } + return provider; + } + @Internal + public static BytecodeProvider buildBytecodeProvider(String providerName) { CoreMessageLogger LOG = Logger.getMessageLogger( CoreMessageLogger.class, BytecodeProviderInitiator.class.getName() ); LOG.bytecodeProvider( providerName ); diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/BytecodeProviderImpl.java b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/BytecodeProviderImpl.java index a560dcb0765a..6b763340bda2 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/BytecodeProviderImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/BytecodeProviderImpl.java @@ -18,7 +18,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.Callable; import org.hibernate.HibernateException; import org.hibernate.bytecode.enhance.internal.bytebuddy.EnhancerImpl; @@ -67,10 +66,11 @@ import net.bytebuddy.jar.asm.Type; import net.bytebuddy.matcher.ElementMatcher; import net.bytebuddy.matcher.ElementMatchers; +import org.checkerframework.checker.nullness.qual.Nullable; public class BytecodeProviderImpl implements BytecodeProvider { - private static final CoreMessageLogger LOG = CoreLogging.messageLogger( BytecodeProviderImpl.class ); + private static final String INSTANTIATOR_PROXY_NAMING_SUFFIX = "HibernateInstantiator"; private static final String OPTIMIZER_PROXY_NAMING_SUFFIX = "HibernateAccessOptimizer"; private static final ElementMatcher.Junction newInstanceMethodName = ElementMatchers.named( @@ -167,7 +167,7 @@ public ReflectionOptimizer getReflectionOptimizer( findAccessors( clazz, getterNames, setterNames, types, getters, setters ); } catch (InvalidPropertyAccessorException ex) { - LOG.unableToGenerateReflectionOptimizer( clazz.getName(), ex ); + LOG.unableToGenerateReflectionOptimizer( clazz.getName(), ex.getMessage() ); return null; } @@ -183,7 +183,7 @@ public ReflectionOptimizer getReflectionOptimizer( .method( setPropertyValuesMethodName ) .intercept( new Implementation.Simple( new SetPropertyValues( clazz, getterNames, setters ) ) ) .method( getPropertyNamesMethodName ) - .intercept( MethodCall.call( new CloningPropertyCall( getterNames ) ) ) + .intercept( new Implementation.Simple( new GetPropertyNames( getterNames ) ) ) ); try { @@ -198,7 +198,7 @@ public ReflectionOptimizer getReflectionOptimizer( } @Override - public ReflectionOptimizer getReflectionOptimizer(Class clazz, Map propertyAccessMap) { + public @Nullable ReflectionOptimizer getReflectionOptimizer(Class clazz, Map propertyAccessMap) { final Class fastClass; if ( !clazz.isInterface() && !Modifier.isAbstract( clazz.getModifiers() ) ) { // we only provide a fast class instantiator if the class can be instantiated @@ -231,7 +231,7 @@ public ReflectionOptimizer getReflectionOptimizer(Class clazz, Map clazz, Map determineAccessOptimizerSuperClass(Class clazz, Member[] get getterType = ( (Method) getter ).getReturnType(); } - builder = builder.define( - new MethodDescription.InDefinedShape.Latent( - builder.toTypeDescription(), - new MethodDescription.Token( - "get_" + getter.getName(), - Opcodes.ACC_PROTECTED | Opcodes.ACC_STATIC, - TypeDescription.Generic.OfNonGenericType.ForLoadedType.of( - getterType - ), - Collections.singletonList( - TypeDescription.Generic.OfNonGenericType.ForLoadedType.of( - clazz - ) - ) - ) - ) + builder = builder.defineMethod( + "get_" + getter.getName(), + TypeDescription.Generic.OfNonGenericType.ForLoadedType.of( + getterType + ), + Opcodes.ACC_PROTECTED | Opcodes.ACC_STATIC ) + .withParameter( foreignPackageClassInfo.clazz ) .intercept( new Implementation.Simple( new GetFieldOnArgument( @@ -357,24 +348,13 @@ private Class determineAccessOptimizerSuperClass(Class clazz, Member[] get setterType = ( (Method) setter ).getParameterTypes()[0]; } - builder = builder.define( - new MethodDescription.InDefinedShape.Latent( - builder.toTypeDescription(), - new MethodDescription.Token( - "set_" + setter.getName(), - Opcodes.ACC_PROTECTED | Opcodes.ACC_STATIC, - TypeDescription.Generic.VOID, - Arrays.asList( - TypeDescription.Generic.OfNonGenericType.ForLoadedType.of( - clazz - ), - TypeDescription.Generic.OfNonGenericType.ForLoadedType.of( - setterType - ) - ) - ) - ) + builder = builder.defineMethod( + "set_" + setter.getName(), + TypeDescription.Generic.VOID, + Opcodes.ACC_PROTECTED | Opcodes.ACC_STATIC ) + .withParameter( foreignPackageClassInfo.clazz ) + .withParameter( setterType ) .intercept( new Implementation.Simple( new SetFieldOnArgument( @@ -556,7 +536,10 @@ public Size apply( ); } methodVisitor.visitInsn( Opcodes.RETURN ); - return new Size( 2, instrumentedMethod.getStackSize() ); + return new Size( + is64BitType( type ) ? 3 : 2, + instrumentedMethod.getStackSize() + ); } private int getLoadOpCode(Class type) { @@ -573,6 +556,10 @@ private int getLoadOpCode(Class type) { } return Opcodes.ALOAD; } + + private boolean is64BitType(Class type) { + return type == long.class || type == double.class; + } } private List createForeignPackageClassInfos(Class clazz) { @@ -780,7 +767,10 @@ else if ( getterMember instanceof Field ) { Opcodes.INVOKESTATIC, Type.getInternalName( foreignPackageMember.getForeignPackageAccessor() ), "get_" + getterMember.getName(), - Type.getMethodDescriptor( Type.getType( type ), Type.getType( clazz ) ), + Type.getMethodDescriptor( + Type.getType( type ), + Type.getType( underlyingMember.getDeclaringClass() ) + ), false ); } @@ -843,17 +833,17 @@ public Size apply( Label nextLabel = new Label(); for ( int index = 0; index < setters.length; index++ ) { final Member setterMember = setters[index]; - if ( enhanced && currentLabel != null ) { + if ( setterMember == EMBEDDED_MEMBER ) { + // The embedded property access does a no-op + continue; + } + if ( currentLabel != null ) { methodVisitor.visitLabel( currentLabel ); implementationContext.getFrameGeneration().same( methodVisitor, instrumentedMethod.getParameters().asTypeList() ); } - if ( setterMember == EMBEDDED_MEMBER ) { - // The embedded property access does a no-op - continue; - } // Push entity on stack methodVisitor.visitVarInsn( Opcodes.ALOAD, 1 ); methodVisitor.visitTypeInsn( Opcodes.CHECKCAST, Type.getInternalName( clazz ) ); @@ -975,12 +965,17 @@ else if ( setterMember instanceof Field ) { Opcodes.INVOKESTATIC, Type.getInternalName( foreignPackageMember.getForeignPackageAccessor() ), "set_" + setterMember.getName(), - Type.getMethodDescriptor( Type.getType( void.class ), Type.getType( clazz ), Type.getType( type ) ), + Type.getMethodDescriptor( + Type.getType( void.class ), + Type.getType( foreignPackageMember.getMember().getDeclaringClass() ), + Type.getType( type ) + ), false ); } if ( enhanced ) { final boolean compositeTracker = CompositeTracker.class.isAssignableFrom( type ); + boolean alreadyHasFrame = false; // The composite owner check and setting only makes sense if // * the value type is a composite tracker // * a value subtype can be a composite tracker @@ -1062,6 +1057,7 @@ else if ( setterMember instanceof Field ) { // Clean stack after the if block methodVisitor.visitLabel( compositeTrackerEndLabel ); implementationContext.getFrameGeneration().same(methodVisitor, instrumentedMethod.getParameters().asTypeList()); + alreadyHasFrame = true; } if ( persistentAttributeInterceptable ) { // Load the owner @@ -1126,9 +1122,20 @@ else if ( setterMember instanceof Field ) { // Clean stack after the if block methodVisitor.visitLabel( instanceofEndLabel ); implementationContext.getFrameGeneration().same(methodVisitor, instrumentedMethod.getParameters().asTypeList()); + alreadyHasFrame = true; } - currentLabel = nextLabel; + if ( alreadyHasFrame ) { + // Usually, the currentLabel is visited as well generating a frame, + // but if a frame was already generated, only visit the label here, + // otherwise two frames for the same bytecode index are generated, + // which is wrong and will produce an error when the JDK ClassFile API is used + methodVisitor.visitLabel( nextLabel ); + currentLabel = null; + } + else { + currentLabel = nextLabel; + } nextLabel = new Label(); } } @@ -1298,22 +1305,34 @@ private static Constructor findConstructor(Class clazz) { } } - public static class CloningPropertyCall implements Callable { + public static class GetPropertyNames implements ByteCodeAppender { private final String[] propertyNames; - private CloningPropertyCall(String[] propertyNames) { + private GetPropertyNames(String[] propertyNames) { this.propertyNames = propertyNames; } @Override - public String[] call() { - return propertyNames.clone(); + public Size apply( + MethodVisitor methodVisitor, + Implementation.Context implementationContext, + MethodDescription instrumentedMethod) { + methodVisitor.visitLdcInsn( propertyNames.length ); + methodVisitor.visitTypeInsn( Opcodes.ANEWARRAY, Type.getInternalName( String.class ) ); + for ( int i = 0; i < propertyNames.length; i++ ) { + methodVisitor.visitInsn( Opcodes.DUP ); + methodVisitor.visitLdcInsn( i ); + methodVisitor.visitLdcInsn( propertyNames[i] ); + methodVisitor.visitInsn( Opcodes.AASTORE ); + } + methodVisitor.visitInsn( Opcodes.ARETURN ); + return new Size( 4, instrumentedMethod.getStackSize() + 1 ); } } @Override - public Enhancer getEnhancer(EnhancementContext enhancementContext) { + public @Nullable Enhancer getEnhancer(EnhancementContext enhancementContext) { return new EnhancerImpl( enhancementContext, byteBuddyState ); } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/HibernateMethodLookupDispatcher.java b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/HibernateMethodLookupDispatcher.java index 96d5a1651e14..79fd59bae8fa 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/HibernateMethodLookupDispatcher.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/bytebuddy/HibernateMethodLookupDispatcher.java @@ -6,7 +6,6 @@ */ package org.hibernate.bytecode.internal.bytebuddy; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.security.AccessController; import java.security.PrivilegedAction; @@ -99,37 +98,11 @@ static void registerAuthorizedClass(String className) { PrivilegedAction[]>> initializeGetCallerStackAction = new PrivilegedAction[]>>() { @Override public PrivilegedAction[]> run() { - Class stackWalkerClass = null; try { - // JDK 9 introduced the StackWalker - stackWalkerClass = Class.forName( "java.lang.StackWalker" ); + return new StackWalkerGetCallerStackAction(StackWalker.getInstance( StackWalker.Option.RETAIN_CLASS_REFERENCE) ); } - catch (ClassNotFoundException e) { - // ignore, we will deal with that later. - } - - if ( stackWalkerClass != null ) { - // We can use a stack walker - try { - Class optionClass = Class.forName( "java.lang.StackWalker$Option" ); - Object stackWalker = stackWalkerClass.getMethod( "getInstance", optionClass ) - // The first one is RETAIN_CLASS_REFERENCE - .invoke( null, optionClass.getEnumConstants()[0] ); - - Method stackWalkerWalkMethod = stackWalkerClass.getMethod( "walk", Function.class ); - Method stackFrameGetDeclaringClass = Class.forName( "java.lang.StackWalker$StackFrame" ) - .getMethod( "getDeclaringClass" ); - return new StackWalkerGetCallerStackAction( - stackWalker, stackWalkerWalkMethod,stackFrameGetDeclaringClass - ); - } - catch (Throwable e) { - throw new HibernateException( "Unable to initialize the stack walker", e ); - } - } - else { - // We cannot use a stack walker, default to fetching the security manager class context - return new SecurityManagerClassContextGetCallerStackAction(); + catch (Throwable e) { + throw new HibernateException( "Unable to initialize the stack walker", e ); } } }; @@ -165,59 +138,42 @@ private static Class getCallerClass() { throw new SecurityException( "Unable to determine the caller class" ); } - /** - * A privileged action that retrieves the caller stack from the security manager class context. - */ - private static class SecurityManagerClassContextGetCallerStackAction extends SecurityManager - implements PrivilegedAction[]> { - @Override - public Class[] run() { - return getClassContext(); - } - } - /** * A privileged action that retrieves the caller stack using a stack walker. */ private static class StackWalkerGetCallerStackAction implements PrivilegedAction[]> { - private final Object stackWalker; - private final Method stackWalkerWalkMethod; - private final Method stackFrameGetDeclaringClass; + private final StackWalker stackWalker; - StackWalkerGetCallerStackAction(Object stackWalker, Method stackWalkerWalkMethod, - Method stackFrameGetDeclaringClass) { + StackWalkerGetCallerStackAction(StackWalker stackWalker) { this.stackWalker = stackWalker; - this.stackWalkerWalkMethod = stackWalkerWalkMethod; - this.stackFrameGetDeclaringClass = stackFrameGetDeclaringClass; } @Override public Class[] run() { try { - return (Class[]) stackWalkerWalkMethod.invoke( stackWalker, stackFrameExtractFunction ); + return stackWalker.walk( stackFrameExtractFunction ); } - catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { + catch (RuntimeException e) { throw new SecurityException( "Unable to determine the caller class", e ); } } - @SuppressWarnings({ "unchecked", "rawtypes" }) - private final Function stackFrameExtractFunction = new Function() { + private final Function, Class[]> stackFrameExtractFunction = new Function<>() { @Override - public Object apply(Stream stream) { + public Class[] apply(Stream stream) { return stream.map( stackFrameGetDeclaringClassFunction ) .limit( MAX_STACK_FRAMES ) .toArray( Class[]::new ); } }; - private final Function> stackFrameGetDeclaringClassFunction = new Function>() { + private final Function> stackFrameGetDeclaringClassFunction = new Function<>() { @Override - public Class apply(Object t) { + public Class apply(StackWalker.StackFrame frame) { try { - return (Class) stackFrameGetDeclaringClass.invoke( t ); + return frame.getDeclaringClass(); } - catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { + catch (RuntimeException e) { throw new HibernateException( "Unable to get stack frame declaring class", e ); } } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/none/BytecodeProviderImpl.java b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/none/BytecodeProviderImpl.java index d389f1fd5896..144687422944 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/internal/none/BytecodeProviderImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/internal/none/BytecodeProviderImpl.java @@ -17,6 +17,8 @@ import org.hibernate.cfg.AvailableSettings; import org.hibernate.property.access.spi.PropertyAccess; +import org.checkerframework.checker.nullness.qual.Nullable; + /** * This BytecodeProvider represents the "no-op" enhancer; mostly useful * as an optimisation when not needing any byte code optimisation applied, @@ -44,12 +46,12 @@ public ReflectionOptimizer getReflectionOptimizer( } @Override - public ReflectionOptimizer getReflectionOptimizer(Class clazz, Map propertyAccessMap) { + public @Nullable ReflectionOptimizer getReflectionOptimizer(Class clazz, Map propertyAccessMap) { throw new HibernateException( "Using the ReflectionOptimizer is not possible when the configured BytecodeProvider is 'none'. Disable " + AvailableSettings.USE_REFLECTION_OPTIMIZER + " or use a different BytecodeProvider"); } @Override - public Enhancer getEnhancer(EnhancementContext enhancementContext) { + public @Nullable Enhancer getEnhancer(EnhancementContext enhancementContext) { return null; } } diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeEnhancementMetadata.java b/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeEnhancementMetadata.java index deecb83f9ff1..55cfb0e5a74b 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeEnhancementMetadata.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeEnhancementMetadata.java @@ -14,6 +14,8 @@ import org.hibernate.engine.spi.PersistentAttributeInterceptor; import org.hibernate.engine.spi.SharedSessionContractImplementor; +import org.checkerframework.checker.nullness.qual.Nullable; + /** * Encapsulates bytecode enhancement information about a particular entity. * @@ -80,9 +82,9 @@ void injectEnhancedEntityAsProxyInterceptor( * * @throws NotInstrumentedException Thrown if {@link #isEnhancedForLazyLoading()} returns {@code false} */ - LazyAttributeLoadingInterceptor extractInterceptor(Object entity) throws NotInstrumentedException; + @Nullable LazyAttributeLoadingInterceptor extractInterceptor(Object entity) throws NotInstrumentedException; - BytecodeLazyAttributeInterceptor extractLazyInterceptor(Object entity) throws NotInstrumentedException; + @Nullable BytecodeLazyAttributeInterceptor extractLazyInterceptor(Object entity) throws NotInstrumentedException; boolean hasUnFetchedAttributes(Object entity); diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeProvider.java b/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeProvider.java index 3b338646739d..077c9e5869d6 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeProvider.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/spi/BytecodeProvider.java @@ -11,8 +11,11 @@ import org.hibernate.bytecode.enhance.spi.EnhancementContext; import org.hibernate.bytecode.enhance.spi.Enhancer; import org.hibernate.property.access.spi.PropertyAccess; +import org.hibernate.service.JavaServiceLoadable; import org.hibernate.service.Service; +import org.checkerframework.checker.nullness.qual.Nullable; + /** * Contract for providers of bytecode services to Hibernate. *

    @@ -23,6 +26,7 @@ * * @author Steve Ebersole */ +@JavaServiceLoadable public interface BytecodeProvider extends Service { /** * Retrieve the specific factory for this provider capable of @@ -54,7 +58,7 @@ public interface BytecodeProvider extends Service { * @param propertyAccessMap The ordered property access map * @return The reflection optimization delegate. */ - ReflectionOptimizer getReflectionOptimizer(Class clazz, Map propertyAccessMap); + @Nullable ReflectionOptimizer getReflectionOptimizer(Class clazz, Map propertyAccessMap); /** * Returns a byte code enhancer that implements the enhancements described in the supplied enhancement context. @@ -63,7 +67,7 @@ public interface BytecodeProvider extends Service { * * @return An enhancer to perform byte code manipulations. */ - Enhancer getEnhancer(EnhancementContext enhancementContext); + @Nullable Enhancer getEnhancer(EnhancementContext enhancementContext); /** * Some BytecodeProvider implementations will have classloader specific caching. diff --git a/hibernate-core/src/main/java/org/hibernate/bytecode/spi/ClassTransformer.java b/hibernate-core/src/main/java/org/hibernate/bytecode/spi/ClassTransformer.java index b198a11cda4c..49d2ef494a86 100644 --- a/hibernate-core/src/main/java/org/hibernate/bytecode/spi/ClassTransformer.java +++ b/hibernate-core/src/main/java/org/hibernate/bytecode/spi/ClassTransformer.java @@ -9,6 +9,7 @@ import java.security.ProtectionDomain; import jakarta.persistence.spi.TransformerException; +import org.checkerframework.checker.nullness.qual.Nullable; /** * A persistence provider provides an instance of this interface to the @@ -36,9 +37,9 @@ public interface ClassTransformer extends jakarta.persistence.spi.ClassTransform */ @Override byte[] transform( - ClassLoader loader, + @Nullable ClassLoader loader, String className, - Class classBeingRedefined, + @Nullable Class classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws TransformerException; } diff --git a/hibernate-core/src/main/java/org/hibernate/cache/MutableCacheKeyBuilder.java b/hibernate-core/src/main/java/org/hibernate/cache/MutableCacheKeyBuilder.java new file mode 100644 index 000000000000..2cb526fe4f9b --- /dev/null +++ b/hibernate-core/src/main/java/org/hibernate/cache/MutableCacheKeyBuilder.java @@ -0,0 +1,31 @@ +/* + * Hibernate, Relational Persistence for Idiomatic Java + * + * License: GNU Lesser General Public License (LGPL), version 2.1 or later. + * See the lgpl.txt file in the root directory or . + */ +package org.hibernate.cache; + +import java.io.Serializable; + +import org.hibernate.cache.spi.QueryResultsCache; + +/** + * A builder that generates a Serializable Object to be used as a key into the {@linkplain QueryResultsCache + * query results cache}. + */ + +public interface MutableCacheKeyBuilder extends Serializable { + + void addValue(Object value); + + + void addHashCode(int hashCode); + + /** + * creates an Object to be used as a key into the {@linkplain QueryResultsCache + * query results cache}. + */ + Serializable build(); + +} diff --git a/hibernate-core/src/main/java/org/hibernate/cache/internal/BasicCacheKeyImplementation.java b/hibernate-core/src/main/java/org/hibernate/cache/internal/BasicCacheKeyImplementation.java index e241ba34c609..68353db38a64 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/internal/BasicCacheKeyImplementation.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/internal/BasicCacheKeyImplementation.java @@ -7,10 +7,8 @@ package org.hibernate.cache.internal; import java.io.Serializable; -import java.util.Objects; import org.hibernate.Internal; -import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.type.Type; /** @@ -25,7 +23,7 @@ * @since 6.2 */ @Internal -final class BasicCacheKeyImplementation implements Serializable { +public final class BasicCacheKeyImplementation implements Serializable { final Serializable id; private final String entityOrRoleName; @@ -44,11 +42,26 @@ public BasicCacheKeyImplementation( final Serializable disassembledKey, final Type type, final String entityOrRoleName) { - assert disassembledKey != null; + this( disassembledKey, entityOrRoleName, calculateHashCode( originalId, type ) ); + } + + /** + * Being an internal contract the arguments are not being checked. + * @param originalId + * @param disassembledKey this must be the "disassembled" form of an ID + * @param type + * @param entityOrRoleName + */ + @Internal + public BasicCacheKeyImplementation( + final Serializable id, + final String entityOrRoleName, + final int hashCode) { + assert id != null; assert entityOrRoleName != null; - this.id = disassembledKey; + this.id = id; this.entityOrRoleName = entityOrRoleName; - this.hashCode = calculateHashCode( originalId, type ); + this.hashCode = hashCode; } private static int calculateHashCode(Object disassembledKey, Type type) { @@ -59,6 +72,11 @@ public Object getId() { return id; } + @Internal + public String getEntityOrRoleName() { + return entityOrRoleName; + } + @Override public boolean equals(final Object other) { if ( other == null ) { diff --git a/hibernate-core/src/main/java/org/hibernate/cache/internal/CacheKeyImplementation.java b/hibernate-core/src/main/java/org/hibernate/cache/internal/CacheKeyImplementation.java index 2822837c6dbd..1c0569d37c30 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/internal/CacheKeyImplementation.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/internal/CacheKeyImplementation.java @@ -10,7 +10,6 @@ import java.util.Objects; import org.hibernate.Internal; -import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.type.Type; /** @@ -31,7 +30,7 @@ public final class CacheKeyImplementation implements Serializable { private final String tenantId; private final int hashCode; - //because of object alignmnet, we had "free space" in this key: + //because of object alignment, we had "free space" in this key: //this field isn't strictly necessary but convenient: watch for //class layout changes. private final boolean requiresDeepEquals; @@ -54,12 +53,31 @@ public CacheKeyImplementation( final Type type, final String entityOrRoleName, final String tenantId) { + this( disassembledKey, entityOrRoleName, tenantId, calculateHashCode( id, type, tenantId ) ); + } + + /** + * Construct a new key for a collection or entity instance. + * Note that an entity name should always be the root entity + * name, not a subclass entity name. + * + * @param id The identifier associated with the cached data + * @param entityOrRoleName The entity or collection-role name. + * @param tenantId The tenant identifier associated with this data. + * @param hashCode the pre-calculated hash code + */ + @Internal + public CacheKeyImplementation( + final Object id, + final String entityOrRoleName, + final String tenantId, + final int hashCode) { assert entityOrRoleName != null; - this.id = disassembledKey; + this.id = id; this.entityOrRoleName = entityOrRoleName; this.tenantId = tenantId; //might actually be null - this.hashCode = calculateHashCode( id, type, tenantId ); - this.requiresDeepEquals = disassembledKey.getClass().isArray(); + this.hashCode = hashCode; + this.requiresDeepEquals = id.getClass().isArray(); } private static int calculateHashCode(Object id, Type type, String tenantId) { @@ -72,6 +90,16 @@ public Object getId() { return id; } + @Internal + public String getEntityOrRoleName() { + return entityOrRoleName; + } + + @Internal + public String getTenantId() { + return tenantId; + } + @Override public boolean equals(Object other) { if ( other == null ) { diff --git a/hibernate-core/src/main/java/org/hibernate/cache/internal/CollectionCacheInvalidator.java b/hibernate-core/src/main/java/org/hibernate/cache/internal/CollectionCacheInvalidator.java index 85d3d189644a..b8d11ee46358 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/internal/CollectionCacheInvalidator.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/internal/CollectionCacheInvalidator.java @@ -181,13 +181,14 @@ private void evict(Object id, CollectionPersister collectionPersister, EventSour if ( LOG.isDebugEnabled() ) { LOG.debug( "Evict CollectionRegion " + collectionPersister.getRole() + " for id " + id ); } - AfterTransactionCompletionProcess afterTransactionProcess = new CollectionEvictCacheAction( + CollectionEvictCacheAction evictCacheAction = new CollectionEvictCacheAction( collectionPersister, null, id, session - ).lockCache(); - session.getActionQueue().registerProcess( afterTransactionProcess ); + ); + evictCacheAction.execute(); + session.getActionQueue().registerProcess( evictCacheAction.getAfterTransactionCompletionProcess() ); } //execute the same process as invalidation with collection operations @@ -202,11 +203,8 @@ private static final class CollectionEvictCacheAction extends CollectionAction { @Override public void execute() throws HibernateException { - } - - public AfterTransactionCompletionProcess lockCache() { beforeExecutions(); - return getAfterTransactionCompletionProcess(); + evict(); } } diff --git a/hibernate-core/src/main/java/org/hibernate/cache/internal/DefaultCacheKeysFactory.java b/hibernate-core/src/main/java/org/hibernate/cache/internal/DefaultCacheKeysFactory.java index 99de4b3367b9..dd1c366b18ec 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/internal/DefaultCacheKeysFactory.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/internal/DefaultCacheKeysFactory.java @@ -69,8 +69,16 @@ public static Object staticCreateEntityKey(Object id, EntityPersister persister, } } - public static Object staticCreateNaturalIdKey(Object naturalIdValues, EntityPersister persister, SharedSessionContractImplementor session) { - return new NaturalIdCacheKey( naturalIdValues, persister, session ); + public static Object staticCreateNaturalIdKey( + Object naturalIdValues, + EntityPersister persister, + SharedSessionContractImplementor session) { + NaturalIdCacheKey.NaturalIdCacheKeyBuilder builder = new NaturalIdCacheKey.NaturalIdCacheKeyBuilder( + naturalIdValues, + persister, + session + ); + return builder.build(); } public static Object staticGetEntityId(Object cacheKey) { diff --git a/hibernate-core/src/main/java/org/hibernate/cache/internal/NaturalIdCacheKey.java b/hibernate-core/src/main/java/org/hibernate/cache/internal/NaturalIdCacheKey.java index a8e09604a7a1..64da34cc4efd 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/internal/NaturalIdCacheKey.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/internal/NaturalIdCacheKey.java @@ -9,11 +9,14 @@ import java.io.IOException; import java.io.ObjectInputStream; import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; import java.util.Objects; +import org.hibernate.Internal; +import org.hibernate.cache.MutableCacheKeyBuilder; import org.hibernate.engine.spi.SharedSessionContractImplementor; import org.hibernate.internal.util.ValueHolder; -import org.hibernate.metamodel.mapping.NaturalIdMapping; import org.hibernate.persister.entity.EntityPersister; /** @@ -33,18 +36,59 @@ public class NaturalIdCacheKey implements Serializable { // "transient" is important here -- NaturalIdCacheKey needs to be Serializable private transient ValueHolder toString; - public NaturalIdCacheKey(Object naturalIdValues, EntityPersister persister, SharedSessionContractImplementor session) { - this( naturalIdValues, persister, persister.getRootEntityName(), session ); - } + public static class NaturalIdCacheKeyBuilder implements MutableCacheKeyBuilder { - public NaturalIdCacheKey(Object naturalIdValues, EntityPersister persister, String entityName, SharedSessionContractImplementor session) { - this.entityName = entityName; - this.tenantId = session.getTenantIdentifier(); + private final String entityName; + private final String tenantIdentifier; + + private final List values; + private int hashCode; + + public NaturalIdCacheKeyBuilder( + Object naturalIdValues, + EntityPersister persister, + String entityName, + SharedSessionContractImplementor session) { + this.entityName = entityName; + this.tenantIdentifier = session.getTenantIdentifier(); + values = new ArrayList<>(); + persister.getNaturalIdMapping().addToCacheKey( this, naturalIdValues, session ); + } + + public NaturalIdCacheKeyBuilder( + Object naturalIdValues, + EntityPersister persister, + SharedSessionContractImplementor session) { + this( naturalIdValues, persister, persister.getRootEntityName(), session ); + } - final NaturalIdMapping naturalIdMapping = persister.getNaturalIdMapping(); + @Override + public void addValue(Object value) { + values.add( value ); + } + + @Override + public void addHashCode(int hashCode) { + this.hashCode = 37 * this.hashCode + hashCode; + } - this.naturalIdValues = naturalIdMapping.disassemble( naturalIdValues, session ); - this.hashCode = naturalIdMapping.calculateHashCode( naturalIdValues ); + @Override + public NaturalIdCacheKey build() { + return new NaturalIdCacheKey( + values.toArray( new Object[0] ), + entityName, + tenantIdentifier, + hashCode + ); + } + } + + @Internal + public NaturalIdCacheKey(Object naturalIdValues, String entityName, String tenantId, int hashCode) { + this.naturalIdValues = naturalIdValues; + this.entityName = entityName; + this.tenantId = tenantId; + this.hashCode = hashCode; initTransients(); } diff --git a/hibernate-core/src/main/java/org/hibernate/cache/internal/QueryResultsCacheImpl.java b/hibernate-core/src/main/java/org/hibernate/cache/internal/QueryResultsCacheImpl.java index 651bcfe9fc86..ad5e04f08a25 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/internal/QueryResultsCacheImpl.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/internal/QueryResultsCacheImpl.java @@ -18,7 +18,6 @@ import org.hibernate.cache.spi.TimestampsCache; import org.hibernate.engine.spi.SharedSessionContractImplementor; -import static org.hibernate.cache.spi.SecondLevelCacheLogger.DEBUG_ENABLED; import static org.hibernate.cache.spi.SecondLevelCacheLogger.L2CACHE_LOGGER; /** @@ -51,7 +50,7 @@ public boolean put( final QueryKey key, final List results, final SharedSessionContractImplementor session) throws HibernateException { - if ( DEBUG_ENABLED ) { + if ( L2CACHE_LOGGER.isDebugEnabled() ) { L2CACHE_LOGGER.debugf( "Caching query results in region: %s; timestamp=%s", cacheRegion.getName(), session.getCacheTransactionSynchronization().getCachingTimestamp() ); } @@ -80,26 +79,27 @@ public List get( final QueryKey key, final Set spaces, final SharedSessionContractImplementor session) throws HibernateException { - if ( DEBUG_ENABLED ) { + final boolean loggerDebugEnabled = L2CACHE_LOGGER.isDebugEnabled(); + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debugf( "Checking cached query results in region: %s", cacheRegion.getName() ); } final CacheItem cacheItem = getCachedData( key, session ); if ( cacheItem == null ) { - if ( DEBUG_ENABLED ) { + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debug( "Query results were not found in cache" ); } return null; } if ( !timestampsCache.isUpToDate( spaces, cacheItem.timestamp, session ) ) { - if ( DEBUG_ENABLED ) { + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debug( "Cached query results were not up-to-date" ); } return null; } - if ( DEBUG_ENABLED ) { + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debug( "Returning cached query results" ); } @@ -111,26 +111,27 @@ public List get( final QueryKey key, final String[] spaces, final SharedSessionContractImplementor session) throws HibernateException { - if ( DEBUG_ENABLED ) { + final boolean loggerDebugEnabled = L2CACHE_LOGGER.isDebugEnabled(); + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debugf( "Checking cached query results in region: %s", cacheRegion.getName() ); } final CacheItem cacheItem = getCachedData( key, session ); if ( cacheItem == null ) { - if ( DEBUG_ENABLED ) { + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debug( "Query results were not found in cache" ); } return null; } if ( !timestampsCache.isUpToDate( spaces, cacheItem.timestamp, session ) ) { - if ( DEBUG_ENABLED ) { + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debug( "Cached query results were not up-to-date" ); } return null; } - if ( DEBUG_ENABLED ) { + if ( loggerDebugEnabled ) { L2CACHE_LOGGER.debug( "Returning cached query results" ); } diff --git a/hibernate-core/src/main/java/org/hibernate/cache/internal/SimpleCacheKeysFactory.java b/hibernate-core/src/main/java/org/hibernate/cache/internal/SimpleCacheKeysFactory.java index f5e8345e0315..5f8813f58b26 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/internal/SimpleCacheKeysFactory.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/internal/SimpleCacheKeysFactory.java @@ -32,9 +32,18 @@ public Object createEntityKey(Object id, EntityPersister persister, SessionFacto } @Override - public Object createNaturalIdKey(Object naturalIdValues, EntityPersister persister, SharedSessionContractImplementor session) { + public Object createNaturalIdKey( + Object naturalIdValues, + EntityPersister persister, + SharedSessionContractImplementor session) { // natural ids always need to be wrapped - return new NaturalIdCacheKey(naturalIdValues, persister, null, session); + NaturalIdCacheKey.NaturalIdCacheKeyBuilder builder = new NaturalIdCacheKey.NaturalIdCacheKeyBuilder( + naturalIdValues, + persister, + null, + session + ); + return builder.build(); } @Override diff --git a/hibernate-core/src/main/java/org/hibernate/cache/spi/QueryKey.java b/hibernate-core/src/main/java/org/hibernate/cache/spi/QueryKey.java index 86f10ecb615a..f1704fe139c1 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/spi/QueryKey.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/spi/QueryKey.java @@ -8,6 +8,7 @@ import java.io.IOException; import java.io.Serializable; +import java.util.Arrays; import java.util.Objects; import java.util.Set; @@ -65,7 +66,7 @@ public static QueryKey from( private final Integer firstRow; private final Integer maxRows; private final String tenantIdentifier; - private final Set enabledFilterNames; + private final String[] enabledFilterNames; /** * For performance reasons, the hashCode is cached; however, it is marked transient so that it can be @@ -85,7 +86,7 @@ public QueryKey( this.firstRow = firstRow; this.maxRows = maxRows; this.tenantIdentifier = tenantIdentifier; - this.enabledFilterNames = enabledFilterNames; + this.enabledFilterNames = enabledFilterNames.toArray( String[]::new ); this.hashCode = generateHashCode(); } @@ -110,7 +111,7 @@ private int generateHashCode() { // result = 37 * result + ( maxRows==null ? 0 : maxRows ); result = 37 * result + ( tenantIdentifier==null ? 0 : tenantIdentifier.hashCode() ); result = 37 * result + parameterBindingsMemento.hashCode(); - result = 37 * result + ( enabledFilterNames == null ? 0 : enabledFilterNames.hashCode() ); + result = 37 * result + Arrays.hashCode( enabledFilterNames ); return result; } @@ -146,7 +147,7 @@ public boolean equals(Object other) { return false; } - if ( ! Objects.equals( enabledFilterNames, that.enabledFilterNames ) ) { + if ( ! Arrays.equals( enabledFilterNames, that.enabledFilterNames ) ) { return false; } diff --git a/hibernate-core/src/main/java/org/hibernate/cache/spi/SecondLevelCacheLogger.java b/hibernate-core/src/main/java/org/hibernate/cache/spi/SecondLevelCacheLogger.java index 596ac47e5e57..d564c57726d9 100644 --- a/hibernate-core/src/main/java/org/hibernate/cache/spi/SecondLevelCacheLogger.java +++ b/hibernate-core/src/main/java/org/hibernate/cache/spi/SecondLevelCacheLogger.java @@ -33,9 +33,6 @@ public interface SecondLevelCacheLogger extends BasicLogger { SecondLevelCacheLogger L2CACHE_LOGGER = Logger.getMessageLogger( SecondLevelCacheLogger.class, LOGGER_NAME ); - boolean DEBUG_ENABLED = L2CACHE_LOGGER.isDebugEnabled(); - boolean TRACE_ENABLED = L2CACHE_LOGGER.isTraceEnabled(); - int NAMESPACE = 90001000; @LogMessage(level = WARN) diff --git a/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java b/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java index d2a86f3a87cf..ec7d2a907df9 100644 --- a/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java +++ b/hibernate-core/src/main/java/org/hibernate/cfg/AvailableSettings.java @@ -12,6 +12,7 @@ import org.hibernate.CustomEntityDirtinessStrategy; import org.hibernate.Incubating; import org.hibernate.Interceptor; +import org.hibernate.Remove; import org.hibernate.SessionFactoryObserver; import org.hibernate.boot.registry.selector.spi.StrategySelector; import org.hibernate.cache.internal.NoCachingRegionFactory; @@ -28,6 +29,9 @@ import org.hibernate.query.sqm.mutation.internal.temptable.PersistentTableStrategy; import org.hibernate.resource.jdbc.spi.PhysicalConnectionHandlingMode; import org.hibernate.resource.jdbc.spi.StatementInspector; +import org.hibernate.sql.ast.spi.ParameterMarkerStrategy; +import org.hibernate.type.WrapperArrayHandling; +import org.hibernate.type.format.FormatMapper; import jakarta.persistence.criteria.CriteriaDelete; import jakarta.persistence.criteria.CriteriaQuery; @@ -308,7 +312,7 @@ public interface AvailableSettings { * Setting that controls whether we seek out JPA "static metamodel" classes * and populate them, either:
      *
    • - * enabled -Do the population + * enabled - Do the population *
    • *
    • * disabled - Do not do the population @@ -1078,7 +1082,10 @@ public interface AvailableSettings { String BATCH_VERSIONED_DATA = "hibernate.jdbc.batch_versioned_data"; /** - * Specifies a {@linkplain java.util.TimeZone time zone} that should be passed to + * Specifies the {@linkplain java.util.TimeZone time zone} to use in the JDBC driver, + * which is supposed to match the database timezone. + *

      + * This is the timezone what will be passed to * {@link java.sql.PreparedStatement#setTimestamp(int, java.sql.Timestamp, java.util.Calendar)} * {@link java.sql.PreparedStatement#setTime(int, java.sql.Time, java.util.Calendar)}, * {@link java.sql.ResultSet#getTimestamp(int, Calendar)}, and @@ -1420,7 +1427,7 @@ public interface AvailableSettings { *

    • a {@link Class} implementing {@link org.hibernate.cache.spi.CacheKeysFactory}, *
    • the name of a class implementing {@link org.hibernate.cache.spi.CacheKeysFactory}, *
    • {@code "default"} as a short name for {@link org.hibernate.cache.internal.DefaultCacheKeysFactory}, or - *
    • '{@code "simple"} as a short name for {@link org.hibernate.cache.internal.SimpleCacheKeysFactory}. + *
    • {@code "simple"} as a short name for {@link org.hibernate.cache.internal.SimpleCacheKeysFactory}. *
    * * @since 5.2 @@ -1638,68 +1645,35 @@ public interface AvailableSettings { String HBM2DDL_CONNECTION = "javax.persistence.schema-generation-connection"; /** - * Specifies whether schema generation commands for schema creation are to be determined based - * on object/relational mapping metadata, DDL scripts, or a combination of the two. See - * {@link org.hibernate.tool.schema.SourceType} for the list of legal values. - *

    - * If no value is specified, a default is inferred as follows: - *

      - *
    • if source scripts are specified via {@value #HBM2DDL_CREATE_SCRIPT_SOURCE}, then - * {@link org.hibernate.tool.schema.SourceType#SCRIPT "script"} is assumed, or - *
    • otherwise, {@link org.hibernate.tool.schema.SourceType#SCRIPT "metadata"} is - * assumed. - *
    - * + * @deprecated Migrate to {@link #JAKARTA_HBM2DDL_CREATE_SOURCE} instead * @see org.hibernate.tool.schema.SourceType */ + @Deprecated String HBM2DDL_CREATE_SOURCE = "javax.persistence.schema-generation.create-source"; /** - * Specifies whether schema generation commands for schema dropping are to be determined - * based on object/relational mapping metadata, DDL scripts, or a combination of the two. - * See {@link org.hibernate.tool.schema.SourceType} for the list of legal values. - *

    - * If no value is specified, a default is inferred as follows: - *

      - *
    • if source scripts are specified via {@value #HBM2DDL_DROP_SCRIPT_SOURCE}, then - * {@link org.hibernate.tool.schema.SourceType#SCRIPT "script"} is assumed, or - *
    • otherwise, {@link org.hibernate.tool.schema.SourceType#SCRIPT "metadata"} is - * assumed. - *
    - * + * @deprecated Migrate to {@link #JAKARTA_HBM2DDL_DROP_SOURCE}. * @see org.hibernate.tool.schema.SourceType */ + @Deprecated String HBM2DDL_DROP_SOURCE = "javax.persistence.schema-generation.drop-source"; /** - * Specifies the CREATE script file as either a {@link java.io.Reader} configured for reading - * the DDL script file or a string designating a file {@link java.net.URL} for the DDL script. - *

    - * Hibernate historically also accepted {@link #HBM2DDL_IMPORT_FILES} for a similar purpose. - * This setting is now preferred. - * - * @see #HBM2DDL_CREATE_SOURCE - * @see #HBM2DDL_IMPORT_FILES + * @deprecated Migrate to {@link #JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE} */ + @Deprecated String HBM2DDL_CREATE_SCRIPT_SOURCE = "javax.persistence.schema-generation.create-script-source"; /** - * Specifies the DROP script file as either a {@link java.io.Reader} configured for reading - * the DDL script file or a string designating a file {@link java.net.URL} for the DDL script. - * - * @see #HBM2DDL_DROP_SOURCE + * @deprecated Migrate to {@link #JAKARTA_HBM2DDL_DROP_SCRIPT_SOURCE} */ + @Deprecated String HBM2DDL_DROP_SCRIPT_SOURCE = "javax.persistence.schema-generation.drop-script-source"; /** - * For cases where the {@value #HBM2DDL_SCRIPTS_ACTION} value indicates that schema creation - * commands should be written to DDL script file, {@value #HBM2DDL_SCRIPTS_CREATE_TARGET} - * specifies either a {@link java.io.Writer} configured for output of the DDL script or a - * string specifying the file URL for the DDL script. - * - * @see #HBM2DDL_SCRIPTS_ACTION + * @deprecated Migrate to {@link #JAKARTA_HBM2DDL_SCRIPTS_CREATE_TARGET} */ - @SuppressWarnings("JavaDoc") + @Deprecated String HBM2DDL_SCRIPTS_CREATE_TARGET = "javax.persistence.schema-generation.scripts.create-target"; /** @@ -1715,14 +1689,9 @@ public interface AvailableSettings { String HBM2DDL_SCRIPTS_CREATE_APPEND = "hibernate.hbm2ddl.schema-generation.script.append"; /** - * For cases where the {@value #HBM2DDL_SCRIPTS_ACTION} value indicates that schema drop commands - * should be written to DDL script file, {@value #HBM2DDL_SCRIPTS_DROP_TARGET} specifies either a - * {@link java.io.Writer} configured for output of the DDL script or a string specifying the file - * URL for the DDL script. - * - * @see #HBM2DDL_SCRIPTS_ACTION + * @deprecated Migrate to {@link #JAKARTA_HBM2DDL_SCRIPTS_DROP_TARGET} */ - @SuppressWarnings("JavaDoc") + @Deprecated String HBM2DDL_SCRIPTS_DROP_TARGET = "javax.persistence.schema-generation.scripts.drop-target"; /** @@ -1735,7 +1704,7 @@ public interface AvailableSettings { *

    * The default value is {@code /import.sql}. *

    - * The JPA-standard setting {@link #HBM2DDL_CREATE_SCRIPT_SOURCE} is now preferred. + * The JPA-standard setting {@link #JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE} is now preferred. */ String HBM2DDL_IMPORT_FILES = "hibernate.hbm2ddl.import_files"; @@ -1748,8 +1717,8 @@ public interface AvailableSettings { /** * The {@link org.hibernate.tool.schema.spi.SqlScriptCommandExtractor} implementation - * to use for parsing source/import files specified by {@link #HBM2DDL_CREATE_SCRIPT_SOURCE}, - * {@link #HBM2DDL_DROP_SCRIPT_SOURCE} or {@link #HBM2DDL_IMPORT_FILES}. Either: + * to use for parsing source/import files specified by {@link #JAKARTA_HBM2DDL_CREATE_SCRIPT_SOURCE}, + * {@link #JAKARTA_HBM2DDL_DROP_SCRIPT_SOURCE} or {@link #HBM2DDL_IMPORT_FILES}. Either: *