Compare commits

..

2 Commits

Author SHA1 Message Date
Berthold Stoeger
1c547fed34 Don't "untranslate" cylinder names
As far as I can see there are no translation strings for the
cylinder names, so there is no point in translating them back.

Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2024-03-30 14:05:21 +01:00
Berthold Stoeger
45a91ad176 desktop: unglobalize ComboBox-models
The combo-boxes (cylinder type, weightsystem, etc.) were controlled
by global models. Keeping these models up-to-date was very combersome
and buggy.

Create a new model everytime a combobox is opened. Ultimately it
might even be better to create a copy of the strings and switch
to simple QStringListModel. Set data in the core directly and
don't do this via the models.

The result is much simpler and easier to handle.

Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2024-03-30 14:05:21 +01:00
235 changed files with 5881 additions and 5954 deletions

View File

@ -1,56 +0,0 @@
name: Manage the Subsurface CICD versioning
inputs:
no-increment:
description: 'Only get the current version, do not increment it even for push events (Caution: not actually a boolean)'
default: false
nightly-builds-secret:
description: The secret to access the nightly builds repository
default: ''
outputs:
version:
description: The long form version number
value: ${{ steps.version_number.outputs.version }}
buildnr:
description: The build number
value: ${{ steps.version_number.outputs.buildnr }}
runs:
using: composite
steps:
- name: atomically create or retrieve the build number and assemble release notes for a push (i.e. merging of a pull request)
if: github.event_name == 'push' && inputs.no-increment == 'false'
env:
NIGHTLY_BUILDS_SECRET: ${{ inputs.nightly-builds-secret }}
shell: bash
run: |
if [ -z "$NIGHTLY_BUILDS_SECRET" ]; then
echo "Need to supply the secret for the nightly-builds repository to increment the version number, aborting."
exit 1
fi
scripts/get-atomic-buildnr.sh $GITHUB_SHA $NIGHTLY_BUILDS_SECRET "CICD-release"
- name: retrieve the current version number in all other cases
if: github.event_name != 'push' || inputs.no-increment != 'false'
env:
PULL_REQUEST_BRANCH: ${{ github.event.pull_request.head.ref }}
shell: bash
run: |
echo "pull-request-$PULL_REQUEST_BRANCH" > latest-subsurface-buildnumber-extension
- name: store version number for the build
id: version_number
env:
PULL_REQUEST_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
shell: bash
run: |
git config --global --add safe.directory $GITHUB_WORKSPACE
# For a pull request we need the information from the pull request branch
# and not from the merge branch on the pull request
git checkout $PULL_REQUEST_HEAD_SHA
version=$(scripts/get-version.sh)
echo "version=$version" >> $GITHUB_OUTPUT
buildnr=$(scripts/get-version.sh 1)
echo "buildnr=$buildnr" >> $GITHUB_OUTPUT
git checkout $GITHUB_SHA

View File

@ -15,17 +15,17 @@ jobs:
VERSION: ${{ '5.15.2' }} # the version numbers here is based on the Qt version, the third digit is the rev of the docker image
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v1
- name: Build the name for the docker image
id: build_name
run: |
v=$VERSION
b=$GITHUB_REF # -BRANCH suffix, unless the branch is master
v=${{ env.VERSION }}
b=${{ github.ref }} # -BRANCH suffix, unless the branch is master
b=${b/refs\/heads\//}
b=${b,,} # the name needs to be all lower case
if [ $b = "master" ] ; then b="" ; else b="-$b" ; fi
echo "NAME=$GITHUB_REPOSITORY_OWNER/android-build${b}:${v}" >> $GITHUB_OUTPUT
echo "NAME=subsurface/android-build${b}:${v}" >> $GITHUB_OUTPUT
- name: Build and Publish Linux Docker image to Dockerhub
uses: elgohr/Publish-Docker-Github-Action@v5

View File

@ -1,5 +1,4 @@
name: Android
on:
push:
paths-ignore:
@ -12,10 +11,12 @@ on:
branches:
- master
jobs:
build:
env:
BUILD_ROOT: ${{ github.workspace }}/..
KEYSTORE_FILE: ${{ github.workspace }}/../subsurface.keystore
jobs:
buildAndroid:
runs-on: ubuntu-latest
container:
image: docker://subsurface/android-build:5.15.2
@ -23,33 +24,32 @@ jobs:
steps:
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
- name: set the version information
- name: atomically create or retrieve the build number and assemble release notes
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
if: github.event_name == 'push'
run: |
bash scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
version=$(cat release-version)
echo "version=$version" >> $GITHUB_OUTPUT
- name: store dummy version and build number for non-push build runs
if: github.event_name != 'push'
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-pull-request" > latest-subsurface-buildnumber-extension
- name: set up the keystore
if: github.event_name == 'push'
env:
ANDROID_KEYSTORE_BASE64: ${{ secrets.ANDROID_KEYSTORE_BASE64 }}
run: |
echo "$ANDROID_KEYSTORE_BASE64" | base64 -d > $KEYSTORE_FILE
echo "${{ secrets.ANDROID_KEYSTORE_BASE64 }}" | base64 -d > $KEYSTORE_FILE
- name: run build
id: build
env:
KEYSTORE_PASSWORD: pass:${{ secrets.ANDROID_KEYSTORE_PASSWORD }}
KEYSTORE_ALIAS: ${{ secrets.ANDROID_KEYSTORE_ALIAS }}
BUILDNR: ${{ steps.version_number.outputs.buildnr }}
run: |
# this is rather awkward, but it allows us to use the preinstalled
# Android and Qt versions with relative paths
cd ..
cd $BUILD_ROOT
ln -s /android/5.15.* .
ln -s /android/build-tools .
ln -s /android/cmdline-tools .
@ -62,25 +62,17 @@ jobs:
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory $GITHUB_WORKSPACE/libdivecomputer
# get the build number via curl so this works both for a pull request as well as a push
BUILDNR=$(curl -q https://raw.githubusercontent.com/subsurface/nightly-builds/main/latest-subsurface-buildnumber)
export OUTPUT_DIR="$GITHUB_WORKSPACE"
bash -x ./subsurface/packaging/android/qmake-build.sh -buildnr $BUILDNR
- name: delete the keystore
if: github.event_name == 'push'
run: |
rm $KEYSTORE_FILE
- name: publish pull request artifacts
if: github.event_name == 'pull_request'
uses: actions/upload-artifact@v4
with:
name: Subsurface-Android-${{ steps.version_number.outputs.version }}
path: Subsurface-mobile-*.apk
export KEYSTORE_FILE="$KEYSTORE_FILE"
export KEYSTORE_PASSWORD="pass:${{ secrets.ANDROID_KEYSTORE_PASSWORD }}"
export KEYSTORE_ALIAS="${{ secrets.ANDROID_KEYSTORE_ALIAS }}"
bash -x ./subsurface/packaging/android/qmake-build.sh -buildnr ${BUILDNR}
# only publish a 'release' on push events (those include merging a PR)
- name: upload binaries
if: github.event_name == 'push'
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.version_number.outputs.version }}
repository: ${{ github.repository_owner }}/nightly-builds
@ -89,3 +81,8 @@ jobs:
fail_on_unmatched_files: true
files: |
Subsurface-mobile-${{ steps.version_number.outputs.version }}.apk
- name: delete the keystore
if: github.event_name == 'push'
run: |
rm $KEYSTORE_FILE

View File

@ -1,24 +0,0 @@
name: Add artifact links to pull request
on:
workflow_run:
workflows: ["Ubuntu 16.04 / Qt 5.15-- for AppImage", "Mac", "Windows", "Android", "iOS"]
types: [completed]
jobs:
artifacts-url-comments:
name: Add artifact links to PR and issues
runs-on: ubuntu-22.04
steps:
- name: Add artifact links to PR and issues
if: github.event.workflow_run.event == 'pull_request'
uses: tonyhallett/artifacts-url-comments@v1.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
prefix: "**Artifacts:**"
suffix: "_**WARNING:** Use at your own risk._"
format: name
addTo: pull
errorNoArtifacts: false

View File

@ -25,19 +25,20 @@ jobs:
matrix:
# Override automatic language detection by changing the below list
# Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python']
language: ['c-cpp', 'javascript-typescript']
language: ['cpp', 'javascript']
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
# We must fetch at least the immediate parents so that if this is
# a pull request then we can checkout the head.
fetch-depth: 2
- name: get container ready for build
run: |
sudo apt-get update
sudo apt-get install -y -q \
sudo apt-get install -y -q --force-yes \
autoconf automake cmake g++ git libcrypto++-dev libcurl4-gnutls-dev \
libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
libqt5webkit5-dev libsqlite3-dev libssh2-1-dev libssl-dev libssl-dev \
@ -50,7 +51,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -59,11 +60,13 @@ jobs:
# queries: ./path/to/local/query, your-org/your-repo/queries@main
- name: Build
env:
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
cd ..
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory $GITHUB_WORKSPACE/libdivecomputer
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
bash -e -x subsurface/scripts/build.sh -desktop -build-with-webkit
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v2

View File

@ -1,5 +1,4 @@
name: Coverity Scan Linux Qt 5.9
on:
schedule:
- cron: '0 18 * * *' # Daily at 18:00 UTC
@ -11,11 +10,14 @@ jobs:
image: ubuntu:22.04
steps:
- name: checkout sources
uses: actions/checkout@v1
- name: add build dependencies
run: |
apt-get update
apt-get dist-upgrade -y
DEBIAN_FRONTEND=noninteractive apt-get install -y -q \
apt-get upgrade -y
DEBIAN_FRONTEND=noninteractive apt-get install -y -q --force-yes \
wget curl \
autoconf automake cmake g++ git libcrypto++-dev libcurl4-gnutls-dev \
libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
@ -27,22 +29,12 @@ jobs:
qtpositioning5-dev qtscript5-dev qttools5-dev qttools5-dev-tools \
qtquickcontrols2-5-dev libbluetooth-dev libmtp-dev
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
- name: configure environment
env:
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory $GITHUB_WORKSPACE/libdivecomputer
- name: get the version information
id: version_number
uses: ./.github/actions/manage-version
with:
no-increment: true
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
- name: run coverity scan
uses: vapier/coverity-scan-action@v1
@ -52,5 +44,5 @@ jobs:
email: glance@acc.umu.se
command: subsurface/scripts/build.sh -desktop -build-with-webkit
working-directory: ${{ github.workspace }}/..
version: ${{ steps.version_number.outputs.version }}
version: $(/scripts/get-version)
description: Automatic scan on github actions

View File

@ -26,9 +26,6 @@ jobs:
- name: Checkout Sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
- name: Process the Documentation
id: process_documentation

View File

@ -11,32 +11,30 @@ jobs:
setup-build:
name: Submit build to Fedora COPR
# this seems backwards, but we want to run under Fedora, but Github doesn' support that
container: fedora:latest
runs-on: ubuntu-latest
container:
image: fedora:latest
steps:
- name: Check out sources
uses: actions/checkout@v1
- name: Setup build dependencies in the Fedora container
run: |
dnf -y install @development-tools @rpm-development-tools
dnf -y install copr-cli make
- name: Check out sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
- name: setup git
run: |
git config --global --add safe.directory /__w/subsurface/subsurface
git config --global --add safe.directory /__w/subsurface/subsurface/libdivecomputer
- name: set the version information
- name: atomically create or retrieve the build number
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
if: github.event_name == 'push'
run: |
bash scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
version=$(cat release-version)
echo "version=$version" >> $GITHUB_OUTPUT
- name: Setup API token for copr-cli
env:
@ -55,5 +53,5 @@ jobs:
- name: run the copr build script
run: |
cd ..
bash -x subsurface/packaging/copr/make-package.sh $GITHUB_REF_NAME
bash -x subsurface/packaging/copr/make-package.sh ${{ github.ref_name }}

View File

@ -1,5 +1,4 @@
name: iOS
on:
push:
paths-ignore:
@ -13,49 +12,37 @@ on:
- master
jobs:
build:
iOSBuild:
runs-on: macOS-11
steps:
- name: switch to Xcode 11
run: sudo xcode-select -s "/Applications/Xcode_11.7.app"
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
uses: actions/checkout@v1
- name: setup Homebrew
run: brew install autoconf automake libtool pkg-config
- name: checkout Qt resources
uses: actions/checkout@v4
with:
repository: subsurface/qt-ios
ref: main
path: qt-ios
- name: set our Qt build
run: |
env
curl -L --output Qt-5.14.1-ios.tar.xz https://f002.backblazeb2.com/file/Subsurface-Travis/Qt-5.14.1-ios.tar.xz
mkdir -p $HOME/Qt
xzcat Qt-5.14.1-ios.tar.xz | tar -x -C $HOME/Qt -f -
- name: set the version information
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
- name: store dummy version and build number for test build
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-test-build" > latest-subsurface-buildnumber-extension
- name: build Subsurface-mobile for iOS
env:
VERSION: ${{ steps.version_number.outputs.version }}
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
cd ..
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory $GITHUB_WORKSPACE/libdivecomputer
export IOS_QT=$GITHUB_WORKSPACE/qt-ios
cd ${SUBSURFACE_REPO_PATH}/..
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
ln -s $HOME/Qt Qt
echo "build for simulator"
bash -x $GITHUB_WORKSPACE/packaging/ios/build.sh -simulator
# We need this in order to be able to access the file and publish it
mv build-Subsurface-mobile-Qt_5_14_1_for_iOS-Release/Release-iphonesimulator/Subsurface-mobile.app $GITHUB_WORKSPACE/Subsurface-mobile-$VERSION.app
- name: publish artifacts
uses: actions/upload-artifact@v4
with:
name: Subsurface-iOS-${{ steps.version_number.outputs.version }}
path: Subsurface-mobile-*.app

View File

@ -0,0 +1,55 @@
name: Ubuntu 18.04 / Qt 5.9--
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
buildOnBionic:
runs-on: ubuntu-18.04
container:
image: ubuntu:18.04 # yes, this looks redundant, but something is messed up with their Ubuntu image that causes our builds to fail
steps:
- name: checkout sources
uses: actions/checkout@v1
- name: add build dependencies
run: |
apt update
apt install -y \
autoconf automake cmake g++ git libcrypto++-dev libcurl4-gnutls-dev \
libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
libqt5webkit5-dev libsqlite3-dev libssh2-1-dev libssl-dev libssl-dev \
libtool libusb-1.0-0-dev libxml2-dev libxslt1-dev libzip-dev make \
pkg-config qml-module-qtlocation qml-module-qtpositioning \
qml-module-qtquick2 qt5-default qt5-qmake qtchooser qtconnectivity5-dev \
qtdeclarative5-dev qtdeclarative5-private-dev qtlocation5-dev \
qtpositioning5-dev qtscript5-dev qttools5-dev qttools5-dev-tools \
qtquickcontrols2-5-dev xvfb libbluetooth-dev libmtp-dev
- name: store dummy version and build number for pull request
if: github.event_name == 'pull_request'
run: |
echo "6.0.100" > latest-subsurface-buildnumber
- name: build Subsurface
env:
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
cd ..
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
bash -x subsurface/scripts/build.sh -desktop -build-with-webkit
- name: test desktop build
run: |
# and now run the tests - with Qt 5.9 we can only run the desktop flavor
echo "------------------------------------"
echo "run tests"
cd build/tests
# xvfb-run --auto-servernum ./TestGitStorage -v2
xvfb-run --auto-servernum make check

View File

@ -1,19 +0,0 @@
name: Debian trixie / Qt 5.15--
on:
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
do-build-test:
uses: ./.github/workflows/linux-debian-generic.yml
with:
container-image: debian:trixie

View File

@ -0,0 +1,39 @@
name: Linux Qt 5.12 Docker Image CI
#on:
# push:
# paths:
# - scripts/docker/trusty-qt512/Dockerfile
# - .github/workflows/linux-docker*
jobs:
trusty-qt512:
runs-on: ubuntu-latest
env:
VERSION: ${{ '1.0' }} # 'official' images should have a dot-zero version
steps:
- uses: actions/checkout@v1
- name: Get our pre-reqs
run: |
cd scripts/docker/trusty-qt512
bash getpackages.sh
- name: set env
run: |
v=${{ env.VERSION }}
b=${{ github.ref }} # -BRANCH suffix, unless the branch is master
b=${b/refs\/heads\//}
b=${b,,} # the name needs to be all lower case
if [ $b = "master" ] ; then b="" ; else b="-$b" ; fi
echo "::set-env name=NAME::subsurface/trusty-qt512${b}:${v}"
- name: Build and Publish Linux Docker image to Dockerhub
uses: elgohr/Publish-Docker-Github-Action@master
with:
name: ${{ env.NAME }}
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
dockerfile: 'Dockerfile'
workdir: './scripts/docker/trusty-qt512/'

View File

@ -1,5 +1,4 @@
name: Fedora 35 / Qt 6--
on:
push:
paths-ignore:
@ -13,12 +12,15 @@ on:
- master
jobs:
build:
buildFedoraQt6:
runs-on: ubuntu-latest
container:
image: fedora:35
steps:
- name: checkout sources
uses: actions/checkout@v1
- name: get container ready for build
run: |
echo "--------------------------------------------------------------"
@ -35,27 +37,22 @@ jobs:
bluez-libs-devel libgit2-devel libzip-devel libmtp-devel \
xorg-x11-server-Xvfb
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
- name: set the version information
id: version_number
uses: ./.github/actions/manage-version
with:
no-increment: true
- name: store dummy version and build number for test build
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-test-build" > latest-subsurface-buildnumber-extension
- name: build Subsurface
env:
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
echo "--------------------------------------------------------------"
echo "building desktop"
# now build for the desktop version (without WebKit)
cd ..
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory $GITHUB_WORKSPACE/libdivecomputer
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
git config --global --get-all safe.directory
bash -e -x subsurface/scripts/build.sh -desktop -build-with-qt6

85
.github/workflows/linux-focal-5.12.yml vendored Normal file
View File

@ -0,0 +1,85 @@
name: Ubuntu 20.04 / Qt 5.12--
on:
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
buildUbuntuFocal:
runs-on: ubuntu-latest
container:
image: ubuntu:20.04
steps:
- name: checkout sources
uses: actions/checkout@v1
- name: get container ready for build
run: |
echo "--------------------------------------------------------------"
echo "update distro and install dependencies"
apt-get update
apt-get upgrade -y
DEBIAN_FRONTEND=noninteractive apt-get install -y -q --force-yes \
autoconf automake cmake g++ git libcrypto++-dev libcurl4-gnutls-dev \
libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
libqt5webkit5-dev libsqlite3-dev libssh2-1-dev libssl-dev libssl-dev \
libtool libusb-1.0-0-dev libxml2-dev libxslt1-dev libzip-dev make \
pkg-config qml-module-qtlocation qml-module-qtpositioning \
qml-module-qtquick2 qt5-qmake qtchooser qtconnectivity5-dev \
qtdeclarative5-dev qtdeclarative5-private-dev qtlocation5-dev \
qtpositioning5-dev qtscript5-dev qttools5-dev qttools5-dev-tools \
qtquickcontrols2-5-dev xvfb libbluetooth-dev libmtp-dev
- name: store dummy version and build number for test build
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-test-build" > latest-subsurface-buildnumber-extension
- name: build Subsurface-mobile
env:
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
echo "--------------------------------------------------------------"
echo "building mobile"
git config --global user.email "ci@subsurface-divelog.org"
git config --global user.name "Subsurface CI"
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
cd ..
bash -e -x subsurface/scripts/build.sh -mobile
- name: test mobile build
run: |
echo "--------------------------------------------------------------"
echo "running tests for mobile"
cd build-mobile/tests
# xvfb-run --auto-servernum ./TestGitStorage -v2
xvfb-run --auto-servernum make check
- name: build Subsurface
run: |
echo "--------------------------------------------------------------"
echo "building desktop"
# now build for the desktop version (including WebKit)
cd ..
bash -e -x subsurface/scripts/build.sh -desktop -build-with-webkit
- name: test desktop build
run: |
echo "--------------------------------------------------------------"
echo "running tests for desktop"
cd build/tests
# xvfb-run --auto-servernum ./TestGitStorage -v2
xvfb-run --auto-servernum make check

View File

@ -1,27 +1,36 @@
name: Generic workflow for Debian and derivatives
name: Ubuntu 22.04 / Qt 5.15--
on:
workflow_call:
inputs:
container-image:
required: true
type: string
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
build:
buildUbuntuJammy:
runs-on: ubuntu-latest
container:
image: ${{ inputs.container-image }}
image: ubuntu:22.04
steps:
- name: checkout sources
uses: actions/checkout@v1
- name: get container ready for build
env:
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
echo "--------------------------------------------------------------"
echo "update distro and install dependencies"
apt-get update
apt-get dist-upgrade -y
DEBIAN_FRONTEND=noninteractive apt-get install -y -q \
apt-get upgrade -y
DEBIAN_FRONTEND=noninteractive apt-get install -y -q --force-yes \
autoconf automake cmake g++ git libcrypto++-dev libcurl4-gnutls-dev \
libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
libqt5webkit5-dev libsqlite3-dev libssh2-1-dev libssl-dev libssl-dev \
@ -35,20 +44,13 @@ jobs:
git config --global user.email "ci@subsurface-divelog.org"
git config --global user.name "Subsurface CI"
git config --global --add safe.directory $GITHUB_WORKSPACE
git config --global --add safe.directory $GITHUB_WORKSPACE/libdivecomputer
# needs git from the previous step
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
- name: set the version information
id: version_number
uses: ./.github/actions/manage-version
with:
no-increment: true
- name: store dummy version and build number for test build
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-test-build" > latest-subsurface-buildnumber-extension
- name: build subsurface-mobile
run: |

View File

@ -19,16 +19,16 @@ jobs:
timeout-minutes: 60
steps:
- name: Check out code
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
# Needed for version determination to work
fetch-depth: 0
submodules: recursive
- name: set the version information
- name: atomically create or retrieve the build number
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
if: github.event_name == 'push'
run: |
bash scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
- name: store dummy version and build number for pull request
if: github.event_name == 'pull_request'
@ -48,11 +48,11 @@ jobs:
/snap/bin/lxc profile device add default ccache disk source=${HOME}/.ccache/ path=/root/.ccache
# Patch snapcraft.yaml to enable ccache
patch -p1 < .github/workflows/scripts/linux-snap.patch
patch -p1 < .github/workflows/linux-snap.patch
# Find common base between master and HEAD to use as cache key.
git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules origin master
echo "key=$( git merge-base origin/master $GITHUB_SHA )" >> $GITHUB_OUTPUT
echo "key=$( git merge-base origin/master ${{ github.sha }} )" >> $GITHUB_OUTPUT
- name: CCache
uses: actions/cache@v3
@ -73,7 +73,7 @@ jobs:
- name: Upload the snap
if: github.event_name == 'push'
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: ${{ steps.build-snap.outputs.snap-name }}
path: ${{ steps.build-snap.outputs.snap-path }}

77
.github/workflows/linux-trusty-5.12.yml vendored Normal file
View File

@ -0,0 +1,77 @@
name: Ubuntu 14.04 / Qt 5.12 for AppImage--
on:
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
buildAppImage:
runs-on: ubuntu-latest
container:
image: docker://subsurface/trusty-qt512:1.1
steps:
- name: checkout sources
uses: actions/checkout@v1
- name: atomically create or retrieve the build number and assemble release notes
id: version_number
if: github.event_name == 'push'
run: |
bash ./scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
version=$(cat release-version)
echo "version=$version" >> $GITHUB_OUTPUT
- name: store dummy version and build number for pull request
if: github.event_name == 'pull_request'
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-pull-request" > latest-subsurface-buildnumber-extension
- name: run build
env:
SUBSURFACE_REPO_PATH: ${{ github.workspace }}
run: |
cd ..
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
rm -rf /install-root/include/libdivecomputer
bash -x subsurface/.github/workflows/scripts/linux-in-container-build.sh
- name: prepare PR artifacts
if: github.event_name == 'pull_request'
run: |
mkdir -p Linux-artifacts
mv Subsurface.AppImage Linux-artifacts
- name: PR artifacts
if: github.event_name == 'pull_request'
uses: actions/upload-artifact@v3
with:
name: Linux-artifacts
path: Linux-artifacts
- name: prepare release artifacts
if: github.event_name == 'push'
run: |
mv Subsurface.AppImage Subsurface-v${{ steps.version_number.outputs.version }}.AppImage
# only publish a 'release' on push events (those include merging a PR)
- name: upload binaries
if: github.event_name == 'push'
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.version_number.outputs.version }}
repository: ${{ github.repository_owner }}/nightly-builds
token: ${{ secrets.NIGHTLY_BUILDS }}
prerelease: false
fail_on_unmatched_files: true
files: |
./Subsurface*.AppImage

View File

@ -1,149 +0,0 @@
name: Ubuntu 16.04 / Qt 5.15-- for AppImage
on:
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
container:
image: ubuntu:16.04
steps:
- name: get container ready for build
run: |
echo "--------------------------------------------------------------"
echo "update distro and install dependencies"
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y -q \
software-properties-common
add-apt-repository -y ppa:savoury1/qt-5-15
add-apt-repository -y ppa:savoury1/kde-5-80
add-apt-repository -y ppa:savoury1/gpg
add-apt-repository -y ppa:savoury1/ffmpeg4
add-apt-repository -y ppa:savoury1/vlc3
add-apt-repository -y ppa:savoury1/gcc-9
add-apt-repository -y ppa:savoury1/display
add-apt-repository -y ppa:savoury1/apt-xenial
add-apt-repository -y ppa:savoury1/gtk-xenial
add-apt-repository -y ppa:savoury1/qt-xenial
add-apt-repository -y ppa:savoury1/kde-xenial
add-apt-repository -y ppa:savoury1/backports
add-apt-repository -y ppa:savoury1/build-tools
apt-get update
apt-get dist-upgrade -y
DEBIAN_FRONTEND=noninteractive apt-get install -y -q \
autoconf automake cmake g++ g++-9 git libcrypto++-dev libcurl4-gnutls-dev \
libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
libqt5webkit5-dev libsqlite3-dev libssh2-1-dev libssl-dev libssl-dev \
libtool libusb-1.0-0-dev libxml2-dev libxslt1-dev libzip-dev make \
pkg-config qml-module-qtlocation qml-module-qtpositioning \
qml-module-qtquick2 qt5-qmake qtchooser qtconnectivity5-dev \
qtdeclarative5-dev qtdeclarative5-private-dev qtlocation5-dev \
qtpositioning5-dev qtscript5-dev qttools5-dev qttools5-dev-tools \
qtquickcontrols2-5-dev xvfb libbluetooth-dev libmtp-dev liblzma-dev \
curl
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 60 \
--slave /usr/bin/g++ g++ /usr/bin/g++-9
- name: checkout sources
# We cannot update this as glibc on 16.04 is too old for node 20.
uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: recursive
- name: set the version information
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
- name: build Subsurface
run: |
echo "--------------------------------------------------------------"
echo "building desktop"
# now build the appimage
cd ..
bash -e -x subsurface/scripts/build.sh -desktop -create-appdir -build-with-webkit
- name: test desktop build
run: |
echo "--------------------------------------------------------------"
echo "running tests for desktop"
cd build/tests
# xvfb-run --auto-servernum ./TestGitStorage -v2
xvfb-run --auto-servernum make check
- name: build appimage
env:
VERSION: ${{ steps.version_number.outputs.version }}
run: |
echo "--------------------------------------------------------------"
echo "assembling AppImage"
export QT_PLUGIN_PATH=$QT_ROOT/plugins
export QT_QPA_PLATFORM_PLUGIN_PATH=$QT_ROOT/plugins
export QT_DEBUG_PLUGINS=1
cd ..
# set up the appdir
mkdir -p appdir/usr/plugins/
# mv googlemaps plugins into place
mv appdir/usr/usr/lib/x86_64-linux-gnu/qt5/plugins/* appdir/usr/plugins # the usr/usr is not a typo, that's where it ends up
rm -rf appdir/usr/home/ appdir/usr/include/ appdir/usr/share/man/ # No need to ship developer and man files as part of the AppImage
rm -rf appdir/usr/usr appdir/usr/lib/x86_64-linux-gnu/cmake appdir/usr/lib/pkgconfig
cp /usr/lib/x86_64-linux-gnu/libssl.so.1.1 appdir/usr/lib/
cp /usr/lib/x86_64-linux-gnu/libcrypto.so.1.1 appdir/usr/lib/
# get the linuxdeployqt tool and run it to collect the libraries
curl -L -O "https://github.com/probonopd/linuxdeployqt/releases/download/7/linuxdeployqt-7-x86_64.AppImage"
chmod a+x linuxdeployqt*.AppImage
unset QTDIR
unset QT_PLUGIN_PATH
unset LD_LIBRARY_PATH
./linuxdeployqt*.AppImage --appimage-extract-and-run ./appdir/usr/share/applications/*.desktop -exclude-libs=libdbus-1.so.3 -bundle-non-qt-libs -qmldir=./subsurface/stats -qmldir=./subsurface/map-widget/ -verbose=2
# create the AppImage
./linuxdeployqt*.AppImage --appimage-extract-and-run ./appdir/usr/share/applications/*.desktop -exclude-libs=libdbus-1.so.3 -appimage -qmldir=./subsurface/stats -qmldir=./subsurface/map-widget/ -verbose=2
# copy AppImage to the calling VM
# with GitHub Actions the $GITHUB_WORKSPACE directory is the current working directory at the start of a step
cp Subsurface*.AppImage* $GITHUB_WORKSPACE/Subsurface-$VERSION.AppImage
- name: PR artifacts
if: github.event_name == 'pull_request'
# We cannot update this as glibc on 16.04 is too old for node 20.
uses: actions/upload-artifact@v3
with:
name: Subsurface-Linux-AppImage-${{ steps.version_number.outputs.version }}
path: Subsurface-*.AppImage
compression-level: 0
# only publish a 'release' on push events (those include merging a PR)
- name: upload binaries
if: github.event_name == 'push'
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.version_number.outputs.version }}
repository: ${{ github.repository_owner }}/nightly-builds
token: ${{ secrets.NIGHTLY_BUILDS }}
prerelease: false
fail_on_unmatched_files: true
files: |
./Subsurface-*.AppImage

View File

@ -1,19 +0,0 @@
name: Ubuntu 20.04 / Qt 5.12--
on:
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
do-build-test:
uses: ./.github/workflows/linux-debian-generic.yml
with:
container-image: ubuntu:20.04

View File

@ -1,19 +0,0 @@
name: Ubuntu 22.04 / Qt 5.15--
on:
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
do-build-test:
uses: ./.github/workflows/linux-debian-generic.yml
with:
container-image: ubuntu:22.04

View File

@ -1,19 +0,0 @@
name: Ubuntu 24.04 / Qt 5.15--
on:
push:
paths-ignore:
- scripts/docker/**
branches:
- master
pull_request:
paths-ignore:
- scripts/docker/**
branches:
- master
jobs:
do-build-test:
uses: ./.github/workflows/linux-debian-generic.yml
with:
container-image: ubuntu:24.04

View File

@ -1,5 +1,4 @@
name: Mac
on:
push:
paths-ignore:
@ -12,38 +11,38 @@ on:
branches:
- master
jobs:
build:
buildMac:
runs-on: macOS-11
steps:
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
uses: actions/checkout@v1
- name: atomically create or retrieve the build number and assemble release notes
id: version_number
if: github.event_name == 'push'
run: |
bash scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
version=$(cat release-version)
echo "version=$version" >> $GITHUB_OUTPUT
- name: store dummy version and build number for pull request
if: github.event_name == 'pull_request'
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-pull-request" > latest-subsurface-buildnumber-extension
- name: setup Homebrew
run: brew install hidapi libxslt libjpg libmtp create-dmg confuse
- name: checkout Qt resources
uses: actions/checkout@v4
with:
repository: subsurface/qt-mac
ref: main
path: qt-mac
- name: set the version information
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
- name: set our Qt build
run: |
curl --output ssrf-Qt-5.15.2-mac.tar.xz https://f002.backblazeb2.com/file/Subsurface-Travis/ssrf-Qt5.15.2.tar.xz
tar -xJf ssrf-Qt-5.15.2-mac.tar.xz
- name: build Subsurface
id: build
run: |
cd ${GITHUB_WORKSPACE}/..
export QT_ROOT=${GITHUB_WORKSPACE}/qt-mac/Qt5.15.13
export QT_ROOT=${GITHUB_WORKSPACE}/Qt5.15.2/5.15.2/clang_64
export QT_QPA_PLATFORM_PLUGIN_PATH=$QT_ROOT/plugins
export PATH=$QT_ROOT/bin:$PATH
export CMAKE_PREFIX_PATH=$QT_ROOT/lib/cmake
@ -59,18 +58,10 @@ jobs:
echo "Created $IMG"
echo "dmg=$IMG" >> $GITHUB_OUTPUT
- name: publish pull request artifacts
if: github.event_name == 'pull_request'
uses: actions/upload-artifact@v4
with:
name: Subsurface-MacOS-${{ steps.version_number.outputs.version }}
path: ${{ steps.build.outputs.dmg }}
compression-level: 0
# only publish a 'release' on push events (those include merging a PR)
- name: upload binaries
if: github.event_name == 'push'
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.version_number.outputs.version }}
repository: ${{ github.repository_owner }}/nightly-builds

View File

@ -1,5 +1,4 @@
name: Post Release Notes
name: Post Release
on:
push:
paths-ignore:
@ -7,35 +6,29 @@ on:
branches:
- master
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs:
postRelease:
runs-on: ubuntu-latest
steps:
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
- name: set the version information
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
# since we are running this step on a pull request, we will skip build numbers in releases
- name: assemble release notes
env:
EVENT_HEAD_COMMIT_ID: ${{ github.event.head_commit.id }}
# Required because we are using the GitHub CLI in 'create-releasenotes.sh'
GH_TOKEN: ${{ github.token }}
- name: atomically create or retrieve the build number and assemble release notes
id: version_number
run: |
scripts/create-releasenotes.sh $EVENT_HEAD_COMMIT_ID
bash -x ./scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
bash scripts/create-releasenotes.sh ${{ github.event.head_commit.id }}
version=$(cat release-version)
echo "version=$version" >> $GITHUB_OUTPUT
# add a file containing the release title so it can be picked up and listed on the release page on our web server
- name: publish release
if: github.event_name == 'push'
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.version_number.outputs.version }}
repository: ${{ github.repository_owner }}/nightly-builds

View File

@ -23,11 +23,13 @@ logger.setLevel(logging.INFO)
APPLICATION = "subsurface-ci"
LAUNCHPAD = "production"
RELEASE = "bionic"
TEAM = "subsurface"
SOURCE_NAME = "subsurface"
SNAPS = {
"subsurface": {
"stable": {"recipe": "subsurface-stable"},
"candidate": {"recipe": "subsurface-candidate"},
},
}

View File

@ -0,0 +1,58 @@
#!/bin/bash
set -x
set -e
# this gets executed by the GitHub Action when building an AppImage for Linux
# inside of the trusty-qt512 container
export PATH=$QT_ROOT/bin:$PATH # Make sure correct qmake is found on the $PATH for linuxdeployqt
export CMAKE_PREFIX_PATH=$QT_ROOT/lib/cmake
# echo "--------------------------------------------------------------"
# echo "install missing packages"
# apt install -y libbluetooth-dev libmtp-dev
# the container currently has things under / that need to be under /__w/subsurface/subsurface instead
cp -a /appdir /__w/subsurface/
cp -a /install-root /__w/subsurface/
echo "--------------------------------------------------------------"
echo "building desktop"
# now build our AppImage
bash -e -x subsurface/scripts/build.sh -desktop -create-appdir -build-with-webkit -quick
echo "--------------------------------------------------------------"
echo "assembling AppImage"
export QT_PLUGIN_PATH=$QT_ROOT/plugins
export QT_QPA_PLATFORM_PLUGIN_PATH=$QT_ROOT/plugins
export QT_DEBUG_PLUGINS=1
# set up the appdir
mkdir -p appdir/usr/plugins/
# mv googlemaps plugins into place
mv appdir/usr/usr/local/Qt/5.*/gcc_64/plugins/* appdir/usr/plugins # the usr/usr is not a typo, that's where it ends up
rm -rf appdir/usr/home/ appdir/usr/include/ appdir/usr/share/man/ # No need to ship developer and man files as part of the AppImage
rm -rf appdir/usr/usr appdir/usr/lib/cmake appdir/usr/lib/pkgconfig
cp /ssllibs/libssl.so appdir/usr/lib/libssl.so.1.1
cp /ssllibs/libcrypto.so appdir/usr/lib/libcrypto.so.1.1
# get the linuxdeployqt tool and run it to collect the libraries
curl -L -O "https://github.com/probonopd/linuxdeployqt/releases/download/7/linuxdeployqt-7-x86_64.AppImage"
chmod a+x linuxdeployqt*.AppImage
unset QTDIR
unset QT_PLUGIN_PATH
unset LD_LIBRARY_PATH
./linuxdeployqt*.AppImage --appimage-extract-and-run ./appdir/usr/share/applications/*.desktop -exclude-libs=libdbus-1.so.3 -bundle-non-qt-libs -qmldir=./subsurface/stats -qmldir=./subsurface/map-widget/ -verbose=2
# create the AppImage
export VERSION=$(cd subsurface/scripts ; ./get-version) # linuxdeployqt uses this for naming the file
./linuxdeployqt*.AppImage --appimage-extract-and-run ./appdir/usr/share/applications/*.desktop -exclude-libs=libdbus-1.so.3 -appimage -qmldir=./subsurface/stats -qmldir=./subsurface/map-widget/ -verbose=2
# copy AppImage to the calling VM
# with GitHub Actions the /${GITHUB_WORKSPACE} directory is the current working directory at the start of a step
cp Subsurface*.AppImage* /${GITHUB_WORKSPACE}/Subsurface.AppImage
ls -l /${GITHUB_WORKSPACE}/Subsurface.AppImage

View File

@ -19,14 +19,14 @@ bash -ex ../subsurface/packaging/windows/mxe-based-build.sh installer
# the strange two step move is in order to get predictable names to use
# in the publish step of the GitHub Action
mv subsurface/subsurface.exe* ${OUTPUT_DIR}/
mv subsurface/subsurface.exe* ${GITHUB_WORKSPACE}/
fullname=$(cd subsurface ; ls subsurface-*.exe)
mv subsurface/"$fullname" ${OUTPUT_DIR}/"${fullname%.exe}-installer.exe"
mv subsurface/"$fullname" ${GITHUB_WORKSPACE}/"${fullname%.exe}-installer.exe"
bash -ex ../subsurface/packaging/windows/smtk2ssrf-mxe-build.sh -a -i
# the strange two step move is in order to get predictable names to use
# in the publish step of the GitHub Action
mv smtk-import/smtk2ssrf.exe ${OUTPUT_DIR}/
mv smtk-import/smtk2ssrf.exe ${GITHUB_WORKSPACE}/
fullname=$(cd smtk-import ; ls smtk2ssrf*.exe)
mv smtk-import/smtk2ssrf*.exe ${OUTPUT_DIR}/"${fullname%.exe}-installer.exe"
mv smtk-import/smtk2ssrf*.exe ${GITHUB_WORKSPACE}/"${fullname%.exe}-installer.exe"

View File

@ -15,16 +15,13 @@ jobs:
steps:
- name: Check out sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
uses: actions/checkout@v1
- name: set the version information
- name: atomically create or retrieve the build number
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
if: github.event_name == 'push'
run: |
bash scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
- name: Setup build dependencies
run: |
@ -51,5 +48,5 @@ jobs:
- name: run the launchpad make-package script
run: |
cd ..
bash -x subsurface/packaging/ubuntu/make-package.sh $GITHUB_REF_NAME
bash -x subsurface/packaging/ubuntu/make-package.sh ${{ github.ref_name }}

View File

@ -16,17 +16,17 @@ jobs:
mxe_sha: 'c0bfefc57a00fdf6cb5278263e21a478e47b0bf5'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v1
- name: Build the name for the docker image
id: build_name
run: |
v=$VERSION
b=$GITHUB_REF # -BRANCH suffix, unless the branch is master
v=${{ env.VERSION }}
b=${{ github.ref }} # -BRANCH suffix, unless the branch is master
b=${b/refs\/heads\//}
b=${b,,} # the name needs to be all lower case
if [ $b = "master" ] ; then b="" ; else b="-$b" ; fi
echo "NAME=$GITHUB_REPOSITORY_OWNER/mxe-build${b}:${v}" >> $GITHUB_OUTPUT
echo "NAME=${{ github.repository_owner }}/mxe-build${b}:${v}" >> $GITHUB_OUTPUT
- name: Build and Publish Linux Docker image to Dockerhub
uses: elgohr/Publish-Docker-Github-Action@v5

View File

@ -1,5 +1,4 @@
name: Windows
on:
push:
paths-ignore:
@ -13,23 +12,28 @@ on:
- master
jobs:
build:
buildWindows:
runs-on: ubuntu-latest
container:
image: docker://subsurface/mxe-build:3.1.0
steps:
- name: checkout sources
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
uses: actions/checkout@v1
- name: set the version information
- name: atomically create or retrieve the build number and assemble release notes
id: version_number
uses: ./.github/actions/manage-version
with:
nightly-builds-secret: ${{ secrets.NIGHTLY_BUILDS }}
if: github.event_name == 'push'
run: |
bash scripts/get-atomic-buildnr.sh ${{ github.sha }} ${{ secrets.NIGHTLY_BUILDS }} "CICD-release"
version=$(cat release-version)
echo "version=$version" >> $GITHUB_OUTPUT
- name: store dummy version and build number for pull request
if: github.event_name == 'pull_request'
run: |
echo "100" > latest-subsurface-buildnumber
echo "CICD-pull-request" > latest-subsurface-buildnumber-extension
- name: get other dependencies
env:
@ -40,28 +44,18 @@ jobs:
git config --global --add safe.directory ${SUBSURFACE_REPO_PATH}/libdivecomputer
cd /win
ln -s /__w/subsurface/subsurface .
bash -x subsurface/packaging/windows/container-prep.sh 2>&1 | tee pre-build.log
bash -x subsurface/.github/workflows/scripts/windows-container-prep.sh 2>&1 | tee pre-build.log
- name: run build
run: |
export OUTPUT_DIR="$GITHUB_WORKSPACE"
cd /win
bash -x subsurface/packaging/windows/in-container-build.sh 2>&1 | tee build.log
bash -x subsurface/.github/workflows/scripts/windows-in-container-build.sh 2>&1 | tee build.log
grep "Built target installer" build.log
- name: publish pull request artifacts
if: github.event_name == 'pull_request'
uses: actions/upload-artifact@v4
with:
name: Subsurface-Windows-${{ steps.version_number.outputs.version }}
path: |
subsurface*.exe*
smtk2ssrf*.exe
# only publish a 'release' on push events (those include merging a PR)
- name: upload binaries
if: github.event_name == 'push'
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.version_number.outputs.version }}
repository: ${{ github.repository_owner }}/nightly-builds

1
.gitignore vendored
View File

@ -49,4 +49,3 @@ appdata/subsurface.appdata.xml
android-mobile/Roboto-Regular.ttf
gh_release_notes.md
release_content_title.txt
/output/

View File

@ -1,4 +1,3 @@
statistics: show proper dates in January
desktop: add country to the fields indexed for full text search
import: update libdivecomputer version, add support for the Scubapro G3 / Luna and Shearwater Tern
desktop: add a button linking to the 'Contribute' page

View File

@ -124,8 +124,8 @@ if (SUBSURFACE_ASAN_BUILD)
endif()
# every compiler understands -Wall
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror=format")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror=format")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
# by detault optimize with -O2 even for debug builds
set (GCC_OPTIMIZATION_FLAGS "-O2" CACHE STRING "GCC optimization flags")
@ -320,7 +320,7 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
endif()
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
execute_process(
COMMAND bash scripts/get-version.sh
COMMAND bash scripts/get-version
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE SSRF_VERSION_STRING
OUTPUT_STRIP_TRAILING_WHITESPACE

View File

@ -135,7 +135,7 @@ msgid ""
"mailto:subsurface@subsurface-divelog.org[our mailing list] and report bugs "
"at https://github.com/Subsurface/subsurface/issues[our bugtracker]. "
"For instructions on how to build the software and (if needed) its "
"dependencies please consult the INSTALL.md file included with the source code."
"dependencies please consult the INSTALL file included with the source code."
msgstr ""
#. type: Plain text

View File

@ -175,7 +175,7 @@ msgid ""
"an email to mailto:subsurface@subsurface-divelog.org[our mailing list] and "
"report bugs at https://github.com/Subsurface-divelog/subsurface/issues[our "
"bugtracker]. For instructions on how to build the software and (if needed) "
"its dependencies please consult the INSTALL.md file included with the source "
"its dependencies please consult the INSTALL file included with the source "
"code."
msgstr ""
"Ce manuel explique comment utiliser le programme _Subsurface_. Pour "
@ -184,7 +184,7 @@ msgstr ""
"pouvez envoyer un e-mail sur mailto:subsurface@subsurface-divelog.org[notre "
"liste de diffusion] et rapportez les bogues sur http://trac.hohndel."
"org[notre bugtracker]. Pour des instructions de compilation du logiciel et "
"(si besoin) de ses dépendances, merci de consulter le fichier INSTALL.md inclus "
"(si besoin) de ses dépendances, merci de consulter le fichier INSTALL inclus "
"dans les sources logicielles."
#. type: Plain text

View File

@ -460,7 +460,7 @@ the software, consult the <em>Downloads</em> page on the
Please discuss issues with this program by sending an email to
<a href="mailto:subsurface@subsurface-divelog.org">our mailing list</a> and report bugs at
<a href="https://github.com/Subsurface/subsurface/issues">our bugtracker</a>. For instructions on how to build the
software and (if needed) its dependencies please consult the INSTALL.md file
software and (if needed) its dependencies please consult the INSTALL file
included with the source code.</p></div>
<div class="paragraph"><p><strong>Audience</strong>: Recreational Scuba Divers, Free Divers, Tec Divers, Professional
Divers</p></div>

View File

@ -34,7 +34,7 @@ https://subsurface-divelog.org/[_Subsurface_ web site].
Please discuss issues with this program by sending an email to
mailto:subsurface@subsurface-divelog.org[our mailing list] and report bugs at
https://github.com/Subsurface/subsurface/issues[our bugtracker]. For instructions on how to build the
software and (if needed) its dependencies please consult the INSTALL.md file
software and (if needed) its dependencies please consult the INSTALL file
included with the source code.
*Audience*: Recreational Scuba Divers, Free Divers, Tec Divers, Professional

View File

@ -517,7 +517,7 @@ web</a>. Por favor, comenta los problemas que tengas con este programa enviando
mail a <a href="mailto:subsurface@subsurface-divelog.org">nuestra lista de correo</a> e informa de
fallos en <a href="https://github.com/Subsurface/subsurface/issues">nuestro bugtracker</a>.
Para instrucciones acerca de como compilar el software y (en caso necesario)
sus dependencias, por favor, consulta el archivo INSTALL.md incluido con el código
sus dependencias, por favor, consulta el archivo INSTALL incluido con el código
fuente.</p></div>
<div class="paragraph"><p><strong>Audiencia</strong>: Buceadores recreativos, Buceadores en apnea, Buceadores técnicos,
Buceadores profesionales.</p></div>

View File

@ -61,7 +61,7 @@ web]. Por favor, comenta los problemas que tengas con este programa enviando un
mail a mailto:subsurface@subsurface-divelog.org[nuestra lista de correo] e informa de
fallos en https://github.com/Subsurface/subsurface/issues[nuestro bugtracker].
Para instrucciones acerca de como compilar el software y (en caso necesario)
sus dependencias, por favor, consulta el archivo INSTALL.md incluido con el código
sus dependencias, por favor, consulta el archivo INSTALL incluido con el código
fuente.
*Audiencia*: Buceadores recreativos, Buceadores en apnea, Buceadores técnicos,

View File

@ -526,7 +526,7 @@ problème, vous pouvez envoyer un e-mail sur
<a href="mailto:subsurface@subsurface-divelog.org">notre liste de diffusion</a> et
rapportez les bogues sur <a href="http://trac.hohndel.org">notre bugtracker</a>. Pour
des instructions de compilation du logiciel et (si besoin) de ses
dépendances, merci de consulter le fichier INSTALL.md inclus dans les sources
dépendances, merci de consulter le fichier INSTALL inclus dans les sources
logicielles.</p></div>
<div class="paragraph"><p><strong>Public</strong> : Plongeurs loisirs, apnéistes, plongeurs Tek et plongeurs
professionnels</p></div>

View File

@ -61,7 +61,7 @@ problème, vous pouvez envoyer un e-mail sur
mailto:subsurface@subsurface-divelog.org[notre liste de diffusion] et
rapportez les bogues sur http://trac.hohndel.org[notre bugtracker]. Pour
des instructions de compilation du logiciel et (si besoin) de ses
dépendances, merci de consulter le fichier INSTALL.md inclus dans les sources
dépendances, merci de consulter le fichier INSTALL inclus dans les sources
logicielles.
*Public* : Plongeurs loisirs, apnéistes, plongeurs Tek et plongeurs

View File

@ -516,7 +516,7 @@ het programma kunnen bij de ontwikkelaars gemeld worden via email op
<a href="mailto:subsurface@subsurface-divelog.org">onze mailinglijst</a>. Fouten kunnen
ook gemeld worden op <a href="https://github.com/Subsurface/subsurface/issues">onze bugtracker</a>.
Instructies hoe <em>Subsurface</em> zelf te compileren vanuit de broncode staan ook op
onze website en in het INSTALL.md bestand in de broncode.</p></div>
onze website en in het INSTALL bestand in de broncode.</p></div>
<div class="paragraph"><p><strong>Doelgroep</strong>: Recreatieve duikers, Tec duikers, Apneu duikers,
Professionele duikers.</p></div>
<div id="toc">

View File

@ -59,7 +59,7 @@ het programma kunnen bij de ontwikkelaars gemeld worden via email op
mailto:subsurface@subsurface-divelog.org[onze mailinglijst]. Fouten kunnen
ook gemeld worden op https://github.com/Subsurface/subsurface/issues[onze bugtracker].
Instructies hoe _Subsurface_ zelf te compileren vanuit de broncode staan ook op
onze website en in het INSTALL.md bestand in de broncode.
onze website en in het INSTALL bestand in de broncode.
*Doelgroep*: Recreatieve duikers, Tec duikers, Apneu duikers,
Professionele duikers.

View File

@ -1,4 +1,5 @@
# Building Subsurface from Source
Building Subsurface from Source
===============================
Subsurface uses quite a few open source libraries and frameworks to do its
job. The most important ones include libdivecomputer, Qt, libxml2, libxslt,
@ -12,27 +13,23 @@ Below are instructions for building Subsurface
- iOS (cross-building)
## Getting Subsurface source
Getting Subsurface source
-------------------------
You can get the sources to the latest development version from our git
repository:
```
git clone http://github.com/Subsurface/subsurface.git
cd subsurface
git submodule init # this will give you our flavor of libdivecomputer
```
You keep it updated by doing:
```
git checkout master
git pull -r
git submodule update
```
### Our flavor of libdivecomputer
Our flavor of libdivecomputer
-----------------------------
Subsurface requires its own flavor of libdivecomputer which is inclduded
above as git submodule
@ -40,7 +37,7 @@ above as git submodule
The branches won't have a pretty history and will include ugly merges,
but they should always allow a fast forward pull that tracks what we
believe developers should build against. All our patches are contained
in the `Subsurface-DS9` branch.
in the "Subsurface-DS9" branch.
This should allow distros to see which patches we have applied on top of
upstream. They will receive force pushes as we rebase to newer versions of
@ -56,7 +53,8 @@ Subsurface or trying to understand what we have done relative to their
respective upstreams.
### Getting Qt5
Getting Qt5
-----------
We use Qt5 in order to only maintain one UI across platforms.
@ -76,41 +74,36 @@ significantly reduced flexibility.
As of this writing, there is thankfully a thirdparty offline installer still
available:
```
pip3 install aqtinstall
aqt install -O <Qt Location> 5.15.2 mac desktop
```
(or whatever version / OS you need). This installer is surprisingly fast
and seems well maintained - note that we don't use this for Windows as
that is completely built from source using MXE.
In order to use this Qt installation, simply add it to your PATH:
```
PATH=<Qt Location>/<version>/<type>/bin:$PATH
```
QtWebKit is needed, if you want to print, but no longer part of Qt5,
so you need to download it and compile. In case you just want to test
without print possibility omit this step.
```
git clone -b 5.212 https://github.com/qt/qtwebkit
mkdir -p qtwebkit/WebKitBuild/Release
cd qtwebkit/WebKitBuild/Release
cmake -DPORT=Qt -DCMAKE_BUILD_TYPE=Release -DQt5_DIR=/<Qt Location>/<version>/<type>/lib/cmake/Qt5 ../..
make install
```
### Other third party library dependencies
Other third party library dependencies
--------------------------------------
In order for our cloud storage to be fully functional you need
libgit2 0.26 or newer.
### cmake build system
cmake build system
------------------
Our main build system is based on cmake. But qmake is needed
for the googlemaps plugin and the iOS build.
@ -121,35 +114,32 @@ distribution (see build instructions).
## Build options for Subsurface
Build options for Subsurface
----------------------------
The following options are recognised when passed to cmake:
`-DCMAKE_BUILD_TYPE=Release` create a release build
`-DCMAKE_BUILD_TYPE=Debug` create a debug build
-DCMAKE_BUILD_TYPE=Release create a release build
-DCMAKE_BUILD_TYPE=Debug create a debug build
The Makefile that was created using cmake can be forced into a much more
verbose mode by calling
```
make VERBOSE=1
```
Many more variables are supported, the easiest way to interact with them is
to call
```
ccmake .
```
in your build directory.
### Building the development version of Subsurface under Linux
Building the development version of Subsurface under Linux
----------------------------------------------------------
On Fedora you need
```
sudo dnf install autoconf automake bluez-libs-devel cmake gcc-c++ git \
libcurl-devel libsqlite3x-devel libssh2-devel libtool libudev-devel \
libusbx-devel libxml2-devel libxslt-devel make \
@ -157,12 +147,10 @@ sudo dnf install autoconf automake bluez-libs-devel cmake gcc-c++ git \
qt5-qtlocation-devel qt5-qtscript-devel qt5-qtsvg-devel \
qt5-qttools-devel qt5-qtwebkit-devel redhat-rpm-config \
bluez-libs-devel libgit2-devel libzip-devel libmtp-devel
```
Package names are sadly different on OpenSUSE
```
sudo zypper install git gcc-c++ make autoconf automake libtool cmake libzip-devel \
libxml2-devel libxslt-devel sqlite3-devel libusb-1_0-devel \
libqt5-linguist-devel libqt5-qttools-devel libQt5WebKitWidgets-devel \
@ -170,11 +158,9 @@ sudo zypper install git gcc-c++ make autoconf automake libtool cmake libzip-deve
libqt5-qtscript-devel libqt5-qtdeclarative-devel \
libqt5-qtconnectivity-devel libqt5-qtlocation-devel libcurl-devel \
bluez-devel libgit2-devel libmtp-devel
```
On Debian Bookworm this seems to work
```
sudo apt install \
autoconf automake cmake g++ git libbluetooth-dev libcrypto++-dev \
libcurl4-openssl-dev libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
@ -184,21 +170,17 @@ sudo apt install \
qt5-qmake qtchooser qtconnectivity5-dev qtdeclarative5-dev \
qtdeclarative5-private-dev qtlocation5-dev qtpositioning5-dev \
qtscript5-dev qttools5-dev qttools5-dev-tools libmtp-dev
```
In order to build and run mobile-on-desktop, you also need
```
sudo apt install \
qtquickcontrols2-5-dev qml-module-qtquick-window2 qml-module-qtquick-dialogs \
qml-module-qtquick-layouts qml-module-qtquick-controls2 qml-module-qtquick-templates2 \
qml-module-qtgraphicaleffects qml-module-qtqml-models2 qml-module-qtquick-controls
```
Package names for Ubuntu 21.04
```
sudo apt install \
autoconf automake cmake g++ git libbluetooth-dev libcrypto++-dev \
libcurl4-gnutls-dev libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
@ -208,21 +190,17 @@ sudo apt install \
qt5-qmake qtchooser qtconnectivity5-dev qtdeclarative5-dev \
qtdeclarative5-private-dev qtlocation5-dev qtpositioning5-dev \
qtscript5-dev qttools5-dev qttools5-dev-tools libmtp-dev
```
In order to build and run mobile-on-desktop, you also need
```
sudo apt install \
qtquickcontrols2-5-dev qml-module-qtquick-window2 qml-module-qtquick-dialogs \
qml-module-qtquick-layouts qml-module-qtquick-controls2 qml-module-qtquick-templates2 \
qml-module-qtgraphicaleffects qml-module-qtqml-models2 qml-module-qtquick-controls
```
On Raspberry Pi (Raspian Buster and Ubuntu Mate 20.04.1) this seems to work
```
sudo apt install \
autoconf automake cmake g++ git libbluetooth-dev libcrypto++-dev \
libcurl4-gnutls-dev libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
@ -232,16 +210,13 @@ sudo apt install \
qt5-qmake qtchooser qtconnectivity5-dev qtdeclarative5-dev \
qtdeclarative5-private-dev qtlocation5-dev qtpositioning5-dev \
qtscript5-dev qttools5-dev qttools5-dev-tools libmtp-dev
```
In order to build and run mobile-on-desktop, you also need
```
sudo apt install \
qtquickcontrols2-5-dev qml-module-qtquick-window2 qml-module-qtquick-dialogs \
qml-module-qtquick-layouts qml-module-qtquick-controls2 qml-module-qtquick-templates2 \
qml-module-qtgraphicaleffects qml-module-qtqml-models2 qml-module-qtquick-controls
```
Note that on Ubuntu Mate on the Raspberry Pi, you may need to configure
@ -251,7 +226,6 @@ swap space configured by default. See the dphys-swapfile package.
On Raspberry Pi OS with Desktop (64-bit) Released April 4th, 2022, this seems
to work
```
sudo apt install \
autoconf automake cmake g++ git libbluetooth-dev libcrypto++-dev \
libcurl4-gnutls-dev libgit2-dev libqt5qml5 libqt5quick5 libqt5svg5-dev \
@ -261,16 +235,15 @@ sudo apt install \
qt5-qmake qtchooser qtconnectivity5-dev qtdeclarative5-dev \
qtdeclarative5-private-dev qtlocation5-dev qtpositioning5-dev \
qtscript5-dev qttools5-dev qttools5-dev-tools libmtp-dev
```
Note that you'll need to increase the swap space as the default of 100MB
doesn't seem to be enough. 1024MB worked on a 3B+.
If maps aren't working, copy the googlemaps plugin
from `<build_dir>/subsurface/googlemaps/build/libqtgeoservices_googlemaps.so`
to `/usr/lib/aarch64-linux-gnu/qt5/plugins/geoservices/`.
from <build_dir>/subsurface/googlemaps/build/libqtgeoservices_googlemaps.so
to /usr/lib/aarch64-linux-gnu/qt5/plugins/geoservices.
If Subsurface can't seem to see your dive computer on `/dev/ttyUSB0`, even after
If Subsurface can't seem to see your dive computer on /dev/ttyUSB0, even after
adjusting your account's group settings (see note below about usermod), it
might be that the FTDI driver doesn't recognize the VendorID/ProductID of your
computer. Follow the instructions here:
@ -283,14 +256,12 @@ follow TN_101.
On PCLinuxOS you appear to need the following packages
```
su -c "apt-get install -y autoconf automake cmake gcc-c++ git libtool \
lib64bluez-devel lib64qt5bluetooth-devel lib64qt5concurrent-devel \
lib64qt5help-devel lib64qt5location-devel lib64qt5quicktest-devel \
lib64qt5quickwidgets-devel lib64qt5script-devel lib64qt5svg-devel \
lib64qt5test-devel lib64qt5webkitwidgets-devel lib64qt5xml-devel \
lib64ssh2-devel lib64usb1.0-devel lib64zip-devel qttools5 qttranslations5"
```
In order to build Subsurface, use the supplied build script. This should
work on most systems that have all the prerequisite packages installed.
@ -298,121 +269,109 @@ work on most systems that have all the prerequisite packages installed.
You should have Subsurface sources checked out in a sane place, something
like this:
```
mkdir -p ~/src
cd ~/src
git clone https://github.com/Subsurface/subsurface.git
./subsurface/scripts/build.sh # <- this step will take quite a while as it
# compiles a handful of libraries before
# building Subsurface
```
Now you can run Subsurface like this:
```
cd ~/src/subsurface/build
./subsurface
```
Note: on many Linux versions (for example on Kubuntu 15.04) the user must
belong to the `dialout` group.
belong to the dialout group.
You may need to run something like
```
sudo usermod -a -G dialout $USER
```
sudo usermod -a -G dialout username
with your correct username and log out and log in again for that to take
effect.
If you get errors like:
```
./subsurface: error while loading shared libraries: libGrantlee_Templates.so.5: cannot open shared object file: No such file or directory
```
You can run the following command:
```
sudo ldconfig ~/src/install-root/lib
```
### Building Subsurface under MacOSX
Building Subsurface under MacOSX
--------------------------------
While it is possible to build all required components completely from source,
at this point the preferred way to build Subsurface is to set up the build
infrastructure via Homebrew and then build the dependencies from source.
0. You need to have XCode installed. The first time (and possibly after updating OSX)
0) You need to have XCode installed. The first time (and possibly after updating OSX)
```
xcode-select --install
```
1. install Homebrew (see https://brew.sh) and then the required build infrastructure:
1) install Homebrew (see https://brew.sh) and then the required build infrastructure:
```
brew install autoconf automake libtool pkg-config gettext
```
2. install Qt
2) install Qt
download the macOS installer from https://download.qt.io/official_releases/online_installers
and use it to install the desired Qt version. At this point the latest Qt5 version is still
preferred over Qt6.
If you plan to deploy your build to an Apple Silicon Mac, you may have better results with
Bluetooth connections if you install Qt5.15.13. If Qt5.15.13 is not available via the
installer, you can download from https://download.qt.io/official_releases/qt/5.15/5.15.13
and build using the usual configure, make, and make install.
3) now build Subsurface
3. now build Subsurface
```
cd ~/src; bash subsurface/scripts/build.sh -build-deps
```
if you are building against Qt6 (still experimental) you can create a universal binary with
```
cd ~/src; bash subsurface/scripts/build.sh -build-with-qt6 -build-deps -fat-build
```
After the above is done, Subsurface.app will be available in the
subsurface/build directory. You can run Subsurface with the command
A. `open subsurface/build/Subsurface.app`
A) open subsurface/build/Subsurface.app
this will however not show diagnostic output
B. `subsurface/build/Subsurface.app/Contents/MacOS/Subsurface`
the [Tab] key is your friend :-)
B) subsurface/build/Subsurface.app/Contents/MacOS/Subsurface
the TAB key is your friend :-)
Debugging can be done with either Xcode or QtCreator.
To install the app for all users, move subsurface/build/Subsurface.app to /Applications.
### Cross-building Subsurface on MacOSX for iOS
Cross-building Subsurface on MacOSX for iOS
-------------------------------------------
0. build SubSurface under MacOSX and iOS
1) build SubSurface under MacOSX and iOS
1. `cd <repo>/..; bash <repo>/scripts/build.sh -build-deps -both`
1.1) cd <repo>/..; bash <repo>/scripts/build.sh -build-deps -both
note: this is mainly done to ensure all external dependencies are downloaded and set
to the correct versions
2. follow [these instructions](packaging/ios/README.md)
2) continue as described in subsurface/packaging/ios
### Cross-building Subsurface on Linux for Windows
Cross-building Subsurface on Linux for Windows
----------------------------------------------
Subsurface for Windows builds on linux by using the [MXE (M cross environment)](https://github.com/mxe/mxe). The easiest way to do this is to use a Docker container with a pre-built MXE for Subsurface by following [these instructions](packaging/windows/README.md).
Subsurface builds nicely with MinGW - the official builds are done as
cross builds under Linux (currently on Ubuntu 20.04). A shell script to do
that (plus the .nsi file to create the installer with makensis) are
included in the packaging/windows directory.
Please read through the explanations and instructions in
packaging/windows/README.md, packaging/windows/create-win-installer.sh, and
packaging/windows/mxe-based-build.sh if you want to build the Windows version
on your Linux system.
### Building Subsurface on Windows
Building Subsurface on Windows
------------------------------
This is NOT RECOMMENDED. To the best of our knowledge there is one single
person who regularly does this. The Subsurface team does not provide support
@ -422,9 +381,8 @@ The lack of a working package management system for Windows makes it
really painful to build Subsurface natively under Windows,
so we don't support that at all.
But if you want to build Subsurface on a Windows system, the docker based [cross-build for Windows](packaging/windows/README.md) works just fine in WSL2 on Windows.
Cross-building Subsurface on Linux for Android
----------------------------------------------
### Cross-building Subsurface on Linux for Android
Follow [these instructions](packaging/android/README.md).
Follow the instructions in packaging/android/README

View File

@ -1,17 +1,20 @@
# Subsurface
[![Windows](https://github.com/subsurface/subsurface/actions/workflows/windows.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/windows.yml)
[![Mac](https://github.com/subsurface/subsurface/actions/workflows/mac.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/mac.yml)
[![iOS](https://github.com/subsurface/subsurface/actions/workflows/ios.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/ios.yml)
[![Android](https://github.com/subsurface/subsurface/actions/workflows/android.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/android.yml)
![Build Status](https://github.com/subsurface/subsurface/workflows/Windows/badge.svg)
![Build Status](https://github.com/subsurface/subsurface/workflows/Mac/badge.svg)
![Build Status](https://github.com/subsurface/subsurface/workflows/iOS/badge.svg)
![Build Status](https://github.com/subsurface/subsurface/workflows/Android/badge.svg)
[![Snap](https://github.com/subsurface/subsurface/actions/workflows/linux-snap.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/linux-snap.yml)
[![Ubuntu 16.04 / Qt 5.15-- for AppImage](https://github.com/subsurface/subsurface/actions/workflows/linux-ubuntu-16.04-5.12-appimage.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/linux-ubuntu-16.04-5.12-appimage.yml)
[![Ubuntu 24.04 / Qt 5.15--](https://github.com/subsurface/subsurface/actions/workflows/linux-ubuntu-24.04-5.15.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/linux-ubuntu-24.04-5.15.yml)
[![Fedora 35 / Qt 6--](https://github.com/subsurface/subsurface/actions/workflows/linux-fedora-35-qt6.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/linux-fedora-35-qt6.yml)
[![Debian trixie / Qt 5.15--](https://github.com/subsurface/subsurface/actions/workflows/linux-debian-trixie-5.15.yml/badge.svg)](https://github.com/subsurface/subsurface/actions/workflows/linux-debian-trixie-5.15.yml)
![Build Status](https://github.com/subsurface/subsurface/workflows/Linux%20Snap/badge.svg)
![Build Status](https://github.com/subsurface/subsurface/workflows/Ubuntu%2014.04%20/%20Qt%205.12%20for%20AppImage--/badge.svg)
![Build Status](https://github.com/subsurface/subsurface/workflows/Ubuntu%2018.04%20/%20Qt%205.9--/badge.svg)
![Build Status](https://github.com/subsurface/subsurface/workflows/Ubuntu%2020.04%20/%20Qt%205.12--/badge.svg)
![Build Status](https://github.com/subsurface/subsurface/workflows/Ubuntu%2022.04%20/%20Qt%205.15--/badge.svg)
[![Coverity Scan Results](https://scan.coverity.com/projects/14405/badge.svg)](https://scan.coverity.com/projects/subsurface-divelog-subsurface)
This is the README file for Subsurface 5.0.10
Please check the `ReleaseNotes.txt` for details about new features and
changes since Subsurface 5.0.9 (and earlier versions).
Subsurface can be found at http://subsurface-divelog.org
@ -21,9 +24,16 @@ Report bugs and issues at https://github.com/Subsurface/subsurface/issues
License: GPLv2
We are releasing 'nightly' builds of Subsurface that are built from the latest version of the code. Versions of this build for Windows, macOS, Android (requiring sideloading), and a Linux AppImage can be downloaded from the [Latest Dev Release](https://www.subsurface-divelog.org/latest-release/) page on [our website](https://www.subsurface-divelog.org/). Alternatively, they can be downloaded [directly from GitHub](https://github.com/subsurface/nightly-builds/releases). Additionally, those same versions are
We frequently make new test versions of Subsurface available at
http://subsurface-divelog.org/downloads/test/ and there you can always get
the latest builds for Mac, Windows, Linux AppImage and Android (with some
caveats about installability). Additionally, those same versions are
posted to the Subsurface-daily repos on Ubuntu Launchpad, Fedora COPR, and
OpenSUSE OBS, and released to [Snapcraft](https://snapcraft.io/subsurface) into the 'edge' channel of subsurface.
OpenSUSE OBS.
These tend to contain the latest bug fixes and features, but also
occasionally the latest bugs and issues. Please understand when using them
that these are primarily intended for testing.
You can get the sources to the latest development version from the git
repository:
@ -35,11 +45,17 @@ git clone https://github.com/Subsurface/subsurface.git
You can also fork the repository and browse the sources at the same site,
simply using https://github.com/Subsurface/subsurface
Additionally, artifacts for Windows, macOS, Android, Linux AppImage, and iOS (simulator build) are generated for all open pull requests and linked in pull request comments. Use these if you want to test the changes in a specific pull request and provide feedback before it has been merged.
If you want the latest release (instead of the bleeding edge
development version) you can either get this via git or the release tar
ball. After cloning run the following command:
If you want a more stable version that is a little bit more tested you can get this from the [Curent Release](https://www.subsurface-divelog.org/current-release/) page on [our website](https://www.subsurface-divelog.org/).
```
git checkout v5.0.10 (or whatever the last release is)
```
Detailed build instructions can be found in the [INSTALL.md](/INSTALL.md) file.
or download a tarball from http://subsurface-divelog.org/downloads/Subsurface-5.0.10.tgz
Detailed build instructions can be found in the INSTALL file.
## System Requirements

View File

@ -25,7 +25,7 @@ SOURCES += subsurface-mobile-main.cpp \
core/devicedetails.cpp \
core/downloadfromdcthread.cpp \
core/qtserialbluetooth.cpp \
core/plannernotes.cpp \
core/plannernotes.c \
core/uemis-downloader.cpp \
core/qthelper.cpp \
core/checkcloudconnection.cpp \
@ -33,7 +33,7 @@ SOURCES += subsurface-mobile-main.cpp \
core/configuredivecomputer.cpp \
core/divelogexportlogic.cpp \
core/divesitehelpers.cpp \
core/errorhelper.cpp \
core/errorhelper.c \
core/exif.cpp \
core/format.cpp \
core/gettextfromc.cpp \
@ -44,9 +44,8 @@ SOURCES += subsurface-mobile-main.cpp \
core/file.cpp \
core/fulltext.cpp \
core/subsurfacestartup.cpp \
core/subsurface-string.cpp \
core/pref.c \
core/profile.cpp \
core/profile.c \
core/device.cpp \
core/dive.cpp \
core/divecomputer.c \
@ -74,18 +73,18 @@ SOURCES += subsurface-mobile-main.cpp \
core/import-cobalt.cpp \
core/import-divinglog.cpp \
core/import-csv.cpp \
core/save-html.cpp \
core/save-html.c \
core/statistics.c \
core/worldmap-save.cpp \
core/worldmap-save.c \
core/libdivecomputer.cpp \
core/version.c \
core/save-git.cpp \
core/datatrak.cpp \
core/ostctools.c \
core/planner.cpp \
core/planner.c \
core/save-xml.cpp \
core/cochran.cpp \
core/deco.cpp \
core/deco.c \
core/divesite.c \
core/equipment.c \
core/gas.c \
@ -94,7 +93,7 @@ SOURCES += subsurface-mobile-main.cpp \
core/sha1.c \
core/string-format.cpp \
core/strtod.c \
core/tag.cpp \
core/tag.c \
core/taxonomy.c \
core/time.cpp \
core/trip.c \
@ -245,6 +244,7 @@ HEADERS += \
core/sample.h \
core/selection.h \
core/sha1.h \
core/strndup.h \
core/string-format.h \
core/subsurfacestartup.h \
core/subsurfacesysinfo.h \

View File

@ -181,7 +181,7 @@ void export_TeX(const char *filename, bool selected_only, bool plain, ExportCall
site ? put_format(&buf, "\\def\\%sgpslon{%f}\n", ssrf, site->location.lon.udeg / 1000000.0) : put_format(&buf, "\\def\\gpslon{}\n");
put_format(&buf, "\\def\\%scomputer{%s}\n", ssrf, dive->dc.model);
put_format(&buf, "\\def\\%scountry{%s}\n", ssrf, country ?: "");
put_format(&buf, "\\def\\%stime{%u:%02u}\n", ssrf, FRACTION_TUPLE(dive->duration.seconds, 60));
put_format(&buf, "\\def\\%stime{%u:%02u}\n", ssrf, FRACTION(dive->duration.seconds, 60));
put_format(&buf, "\n%% Dive Profile Details:\n");
dive->maxtemp.mkelvin ? put_format(&buf, "\\def\\%smaxtemp{%.1f\\%stemperatureunit}\n", ssrf, get_temp_units(dive->maxtemp.mkelvin, &unit), ssrf) : put_format(&buf, "\\def\\%smaxtemp{}\n", ssrf);
@ -191,8 +191,14 @@ void export_TeX(const char *filename, bool selected_only, bool plain, ExportCall
dive->maxdepth.mm ? put_format(&buf, "\\def\\%smaximumdepth{%.1f\\%sdepthunit}\n", ssrf, get_depth_units(dive->maxdepth.mm, NULL, &unit), ssrf) : put_format(&buf, "\\def\\%smaximumdepth{}\n", ssrf);
dive->meandepth.mm ? put_format(&buf, "\\def\\%smeandepth{%.1f\\%sdepthunit}\n", ssrf, get_depth_units(dive->meandepth.mm, NULL, &unit), ssrf) : put_format(&buf, "\\def\\%smeandepth{}\n", ssrf);
std::string tags = taglist_get_tagstring(dive->tag_list);
put_format(&buf, "\\def\\%stype{%s}\n", ssrf, tags.c_str());
struct tag_entry *tag = dive->tag_list;
QString tags;
if (tag) {
tags = tag->tag->name;
while ((tag = tag->next))
tags += QString(", ") + QString(tag->tag->name);
}
put_format(&buf, "\\def\\%stype{%s}\n", ssrf, qPrintable(tags));
put_format(&buf, "\\def\\%sviz{%s}\n", ssrf, qPrintable(viz));
put_format(&buf, "\\def\\%srating{%s}\n", ssrf, qPrintable(rating));
put_format(&buf, "\\def\\%splot{\\includegraphics[width=9cm,height=4cm]{profile%d}}\n", ssrf, dive->number);

View File

@ -1,20 +1,20 @@
execute_process(
COMMAND bash ${CMAKE_TOP_SRC_DIR}/scripts/get-version.sh 4
COMMAND bash ${CMAKE_TOP_SRC_DIR}/scripts/get-version 4
WORKING_DIRECTORY ${CMAKE_TOP_SRC_DIR}
OUTPUT_VARIABLE CANONICAL_VERSION_STRING_4
OUTPUT_STRIP_TRAILING_WHITESPACE
)
execute_process(
COMMAND bash ${CMAKE_TOP_SRC_DIR}/scripts/get-version.sh 3
COMMAND bash ${CMAKE_TOP_SRC_DIR}/scripts/get-version 3
WORKING_DIRECTORY ${CMAKE_TOP_SRC_DIR}
OUTPUT_VARIABLE CANONICAL_VERSION_STRING_3
OUTPUT_STRIP_TRAILING_WHITESPACE
)
execute_process(
COMMAND bash ${CMAKE_TOP_SRC_DIR}/scripts/get-version.sh
COMMAND bash ${CMAKE_TOP_SRC_DIR}/scripts/get-version
WORKING_DIRECTORY ${CMAKE_TOP_SRC_DIR}
OUTPUT_VARIABLE CANONICAL_VERSION_STRING
OUTPUT_STRIP_TRAILING_WHITESPACE

View File

@ -100,7 +100,6 @@ enum class EditProfileType {
ADD,
REMOVE,
MOVE,
EDIT,
};
void replanDive(dive *d); // dive computer(s) and cylinder(s) of first argument will be consumed!
void editProfile(const dive *d, int dcNr, EditProfileType type, int count);

View File

@ -521,11 +521,6 @@ ImportDives::ImportDives(struct divelog *log, int flags, const QString &source)
continue;
filterPresetsToAdd.emplace_back(preset.name, preset.data);
}
free(dives_to_add.dives);
free(dives_to_remove.dives);
free(trips_to_add.trips);
free(sites_to_add.dive_sites);
}
bool ImportDives::workToBeDone()

View File

@ -566,7 +566,7 @@ QStringList EditTags::data(struct dive *d) const
{
QStringList res;
for (const struct tag_entry *tag = d->tag_list; tag; tag = tag->next)
res.push_back(QString::fromStdString(tag->tag->name));
res.push_back(tag->tag->name);
return res;
}
@ -879,7 +879,6 @@ QString editProfileTypeToString(EditProfileType type, int count)
case EditProfileType::ADD: return Command::Base::tr("Add stop");
case EditProfileType::REMOVE: return Command::Base::tr("Remove %n stop(s)", "", count);
case EditProfileType::MOVE: return Command::Base::tr("Move %n stop(s)", "", count);
case EditProfileType::EDIT: return Command::Base::tr("Edit stop");
}
}
@ -905,7 +904,7 @@ EditProfile::EditProfile(const dive *source, int dcNr, EditProfileType type, int
copy_samples(sdc, &dc);
copy_events(sdc, &dc);
setText(editProfileTypeToString(type, count) + " " + diveNumberOrDate(d));
setText(editProfileTypeToString(type, count) + diveNumberOrDate(d));
}
EditProfile::~EditProfile()
@ -926,7 +925,6 @@ void EditProfile::undo()
std::swap(sdc->samples, dc.samples);
std::swap(sdc->alloc_samples, dc.alloc_samples);
std::swap(sdc->sample, dc.sample);
std::swap(sdc->events, dc.events);
std::swap(sdc->maxdepth, dc.maxdepth);
std::swap(d->maxdepth, maxdepth);
std::swap(d->meandepth, meandepth);
@ -1127,7 +1125,7 @@ AddCylinder::AddCylinder(bool currentDiveOnly) :
setText(Command::Base::tr("Add cylinder"));
else
setText(Command::Base::tr("Add cylinder (%n dive(s))", "", dives.size()));
cyl = create_new_manual_cylinder(dives[0]);
cyl = create_new_cylinder(dives[0]);
indexes.reserve(dives.size());
}
@ -1319,7 +1317,8 @@ EditCylinder::EditCylinder(int index, cylinder_t cylIn, EditCylinderType typeIn,
void EditCylinder::redo()
{
for (size_t i = 0; i < dives.size(); ++i) {
set_tank_info_data(&tank_info_table, cyl[i].type.description, cyl[i].type.size, cyl[i].type.workingpressure);
set_tank_info_size(&tank_info_table, cyl[i].type.description, cyl[i].type.size);
set_tank_info_workingpressure(&tank_info_table, cyl[i].type.description, cyl[i].type.workingpressure);
std::swap(*get_cylinder(dives[i], indexes[i]), cyl[i]);
update_cylinder_related_info(dives[i]);
emit diveListNotifier.cylinderEdited(dives[i], indexes[i]);
@ -1427,7 +1426,7 @@ EditDive::EditDive(dive *oldDiveIn, dive *newDiveIn, dive_site *createDs, dive_s
changedFields |= DiveField::CHILL;
if (!same_string(oldDive->suit, newDive->suit))
changedFields |= DiveField::SUIT;
if (taglist_get_tagstring(oldDive->tag_list) != taglist_get_tagstring(newDive->tag_list)) // This is cheating. Do we have a taglist comparison function?
if (get_taglist_string(oldDive->tag_list) != get_taglist_string(newDive->tag_list)) // This is cheating. Do we have a taglist comparison function?
changedFields |= DiveField::TAGS;
if (oldDive->dc.divemode != newDive->dc.divemode)
changedFields |= DiveField::MODE;

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "command_pictures.h"
#include "core/errorhelper.h"
#include "core/subsurface-qt/divelistnotifier.h"
#include "qt-models/divelocationmodel.h"
@ -25,7 +24,7 @@ void SetPictureOffset::redo()
{
picture *pic = dive_get_picture(d, filename);
if (!pic) {
report_info("SetPictureOffset::redo(): picture disappeared!");
fprintf(stderr, "SetPictureOffset::redo(): picture disappeared!\n");
return;
}
std::swap(pic->offset, offset);
@ -74,7 +73,7 @@ static std::vector<PictureListForAddition> removePictures(std::vector<PictureLis
for (const std::string &fn: list.filenames) {
int idx = get_picture_idx(&list.d->pictures, fn.c_str());
if (idx < 0) {
report_info("removePictures(): picture disappeared!");
fprintf(stderr, "removePictures(): picture disappeared!\n");
continue; // Huh? We made sure that this can't happen by filtering out non-existent pictures.
}
filenames.push_back(QString::fromStdString(fn));
@ -104,7 +103,7 @@ static std::vector<PictureListForDeletion> addPictures(std::vector<PictureListFo
for (const PictureObj &pic: list.pics) {
int idx = get_picture_idx(&list.d->pictures, pic.filename.c_str()); // This should *not* already exist!
if (idx >= 0) {
report_info("addPictures(): picture disappeared!");
fprintf(stderr, "addPictures(): picture disappeared!\n");
continue; // Huh? We made sure that this can't happen by filtering out existing pictures.
}
picsForSignal.push_back(pic);

View File

@ -53,7 +53,7 @@ set(SUBSURFACE_CORE_LIB_SRCS
connectionlistmodel.h
datatrak.cpp
datatrak.h
deco.cpp
deco.c
deco.h
device.cpp
device.h
@ -85,7 +85,7 @@ set(SUBSURFACE_CORE_LIB_SRCS
eventtype.h
equipment.c
equipment.h
errorhelper.cpp
errorhelper.c
exif.cpp
exif.h
extradata.h
@ -141,12 +141,12 @@ set(SUBSURFACE_CORE_LIB_SRCS
picture.h
pictureobj.cpp
pictureobj.h
planner.cpp
planner.c
planner.h
plannernotes.cpp
plannernotes.c
pref.h
pref.c
profile.cpp
profile.c
profile.h
qt-gui.h
qt-init.cpp
@ -156,7 +156,7 @@ set(SUBSURFACE_CORE_LIB_SRCS
sample.cpp
sample.h
save-git.cpp
save-html.cpp
save-html.c
save-html.h
save-profiledata.c
save-xml.cpp
@ -167,17 +167,17 @@ set(SUBSURFACE_CORE_LIB_SRCS
ssrf.h
statistics.c
statistics.h
strndup.h
string-format.h
string-format.cpp
strtod.c
subsurface-float.h
subsurface-string.cpp
subsurface-string.h
subsurfacestartup.cpp
subsurfacestartup.h
subsurfacesysinfo.cpp
subsurfacesysinfo.h
tag.cpp
tag.c
tag.h
taxonomy.c
taxonomy.h
@ -203,7 +203,7 @@ set(SUBSURFACE_CORE_LIB_SRCS
windowtitleupdate.cpp
windowtitleupdate.h
worldmap-options.h
worldmap-save.cpp
worldmap-save.c
worldmap-save.h
xmlparams.cpp
xmlparams.h

View File

@ -2,9 +2,9 @@
#include "btdiscovery.h"
#include "downloadfromdcthread.h"
#include "libdivecomputer.h"
#include "errorhelper.h"
#include "core/libdivecomputer.h"
#include <QTimer>
#include <QDebug>
#include <QLoggingCategory>
#include <QRegularExpression>
#include <QElapsedTimer>
@ -177,7 +177,7 @@ BTDiscovery::BTDiscovery(QObject*) : m_btValid(false),
discoveryAgent(nullptr)
{
if (m_instance) {
report_info("trying to create an additional BTDiscovery object");
qDebug() << "trying to create an additional BTDiscovery object";
return;
}
m_instance = this;
@ -195,11 +195,11 @@ void BTDiscovery::showNonDiveComputers(bool show)
void BTDiscovery::BTDiscoveryReDiscover()
{
#if !defined(Q_OS_IOS)
report_info("BTDiscoveryReDiscover: localBtDevice.isValid() %d", localBtDevice.isValid());
qDebug() << "BTDiscoveryReDiscover: localBtDevice.isValid()" << localBtDevice.isValid();
if (localBtDevice.isValid() &&
localBtDevice.hostMode() != QBluetoothLocalDevice::HostPoweredOff) {
btPairedDevices.clear();
report_info("BTDiscoveryReDiscover: localDevice %s is powered on, starting discovery", qPrintable(localBtDevice.name()));
qDebug() << "BTDiscoveryReDiscover: localDevice " + localBtDevice.name() + " is powered on, starting discovery";
#else
// for iOS we can't use the localBtDevice as iOS is BLE only
// we need to find some other way to test if Bluetooth is enabled, though
@ -220,13 +220,13 @@ void BTDiscovery::BTDiscoveryReDiscover()
connect(discoveryAgent, QOverload<QBluetoothDeviceDiscoveryAgent::Error>::of(&QBluetoothDeviceDiscoveryAgent::error),
#endif
[this](QBluetoothDeviceDiscoveryAgent::Error error){
report_info("device discovery received error %s", qPrintable(discoveryAgent->errorString()));
qDebug() << "device discovery received error" << discoveryAgent->errorString();
});
report_info("discovery methods %d", (int)QBluetoothDeviceDiscoveryAgent::supportedDiscoveryMethods());
qDebug() << "discovery methods" << (int)QBluetoothDeviceDiscoveryAgent::supportedDiscoveryMethods();
}
#if defined(Q_OS_ANDROID)
// on Android, we cannot scan for classic devices - we just get the paired ones
report_info("starting BLE discovery");
qDebug() << "starting BLE discovery";
discoveryAgent->start(QBluetoothDeviceDiscoveryAgent::LowEnergyMethod);
getBluetoothDevices();
// and add the paired devices to the internal data
@ -235,10 +235,10 @@ void BTDiscovery::BTDiscoveryReDiscover()
for (int i = 0; i < btPairedDevices.length(); i++)
btDeviceDiscoveredMain(btPairedDevices[i], true);
#else
report_info("starting BT/BLE discovery");
qDebug() << "starting BT/BLE discovery";
discoveryAgent->start();
for (int i = 0; i < btPairedDevices.length(); i++)
report_info("Paired = %s %s", qPrintable( btPairedDevices[i].name), qPrintable(btPairedDevices[i].address));
qDebug() << "Paired =" << btPairedDevices[i].name << btPairedDevices[i].address;
#endif
#if defined(Q_OS_IOS) || (defined(Q_OS_LINUX) && !defined(Q_OS_ANDROID))
@ -248,7 +248,7 @@ void BTDiscovery::BTDiscoveryReDiscover()
timer.start(3000);
#endif
} else {
report_info("localBtDevice isn't valid or not connectable");
qDebug() << "localBtDevice isn't valid or not connectable";
m_btValid = false;
}
}
@ -291,10 +291,10 @@ QString markBLEAddress(const QBluetoothDeviceInfo *device)
void BTDiscovery::btDeviceDiscoveryFinished()
{
report_info("BT/BLE finished discovery");
qDebug() << "BT/BLE finished discovery";
QList<QBluetoothDeviceInfo> devList = discoveryAgent->discoveredDevices();
for (QBluetoothDeviceInfo device: devList) {
report_info("%s %s", qPrintable(device.name()), qPrintable(device.address().toString()));
qDebug() << device.name() << device.address().toString();
}
}
@ -308,7 +308,7 @@ void BTDiscovery::btDeviceDiscovered(const QBluetoothDeviceInfo &device)
const auto serviceUuids = device.serviceUuids();
for (QBluetoothUuid id: serviceUuids) {
addBtUuid(id);
report_info("%s", qPrintable(id.toByteArray()));
qDebug() << id.toByteArray();
}
#if defined(Q_OS_IOS) || defined(Q_OS_MACOS) || defined(Q_OS_WIN)
@ -337,7 +337,7 @@ void BTDiscovery::btDeviceDiscoveredMain(const btPairedDevice &device, bool from
msg = QString("%1 device: '%2' [%3]: ").arg(fromPaired ? "Paired" : "Discovered new").arg(newDevice).arg(device.address);
if (newDC) {
QString vendor = dc_descriptor_get_vendor(newDC);
report_info("%s this could be a %s", qPrintable(msg), qPrintable(vendor));
qDebug() << msg << "this could be a " + vendor;
btVP.btpdi = device;
btVP.dcDescriptor = newDC;
btVP.vendorIdx = vendorList.indexOf(vendor);
@ -352,7 +352,7 @@ void BTDiscovery::btDeviceDiscoveredMain(const btPairedDevice &device, bool from
newDevice += " ";
connectionListModel.addAddress(newDevice + device.address);
}
report_info("%s not recognized as dive computer", qPrintable(msg));
qDebug() << msg << "not recognized as dive computer";
}
QList<BTDiscovery::btVendorProduct> BTDiscovery::getBtDcs()
@ -407,12 +407,12 @@ void BTDiscovery::getBluetoothDevices()
result.address = dev.callObjectMethod("getAddress","()Ljava/lang/String;").toString();
result.name = dev.callObjectMethod("getName", "()Ljava/lang/String;").toString();
if (btType & 1) { // DEVICE_TYPE_CLASSIC
report_info("paired BT classic device type %d with address %s", btType, qPrintable(result.address));
qDebug() << "paired BT classic device type" << btType << "with address" << result.address;
btPairedDevices.append(result);
}
if (btType & 2) { // DEVICE_TYPE_LE
result.address = QString("LE:%1").arg(result.address);
report_info("paired BLE device type %d with address %s", btType, qPrintable(result.address));
qDebug() << "paired BLE device type" << btType << "with address" << result.address;
btPairedDevices.append(result);
}
}
@ -451,7 +451,7 @@ void BTDiscovery::discoverAddress(QString address)
btAddress = extractBluetoothAddress(address);
if (!btDeviceInfo.keys().contains(address) && !discoveryAgent->isActive()) {
report_info("restarting discovery agent");
qDebug() << "restarting discovery agent";
discoveryAgent->start();
}
}
@ -460,7 +460,7 @@ void BTDiscovery::stopAgent()
{
if (!discoveryAgent)
return;
report_info("---> stopping the discovery agent");
qDebug() << "---> stopping the discovery agent";
discoveryAgent->stop();
}
@ -491,7 +491,7 @@ QString extractBluetoothNameAddress(const QString &address, QString &name)
name = m.captured(1).trimmed();
return extractedAddress;
}
report_info("can't parse address %s", qPrintable(address));
qDebug() << "can't parse address" << address;
return QString();
}
@ -507,7 +507,7 @@ QBluetoothDeviceInfo getBtDeviceInfo(const QString &devaddr)
return btDeviceInfo[devaddr];
}
if(!btDeviceInfo.keys().contains(devaddr)) {
report_info("still looking scan is still running, we should just wait for a few moments");
qDebug() << "still looking scan is still running, we should just wait for a few moments";
// wait for a maximum of 30 more seconds
// yes, that seems crazy, but on my Mac I see this take more than 20 seconds
QElapsedTimer timer;
@ -521,7 +521,7 @@ QBluetoothDeviceInfo getBtDeviceInfo(const QString &devaddr)
QThread::msleep(100);
} while (timer.elapsed() < 30000);
}
report_info("notify user that we can't find %s", qPrintable(devaddr));
qDebug() << "notify user that we can't find" << devaddr;
return QBluetoothDeviceInfo();
}
#endif // BT_SUPPORT

View File

@ -10,7 +10,6 @@
#include "qthelper.h"
#include "git-access.h"
#include "errorhelper.h"
#include "core/format.h"
#include "core/subsurface-string.h"
#include "core/membuffer.h"
#include "core/settings/qPrefCloudStorage.h"
@ -35,7 +34,7 @@ CheckCloudConnection::CheckCloudConnection(QObject *parent) :
bool CheckCloudConnection::checkServer()
{
if (verbose)
report_info("Checking cloud connection...");
fprintf(stderr, "Checking cloud connection...\n");
QEventLoop loop;
QNetworkAccessManager *mgr = new QNetworkAccessManager();
@ -73,10 +72,10 @@ bool CheckCloudConnection::checkServer()
}
}
if (verbose)
report_info("connection test to cloud server %s failed %d %s %d %s", prefs.cloud_base_url,
static_cast<int>(reply->error()), qPrintable(reply->errorString()),
reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(),
qPrintable(reply->readAll()));
qDebug() << "connection test to cloud server" << prefs.cloud_base_url << "failed" <<
reply->error() << reply->errorString() <<
reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt() <<
reply->readAll();
} while (nextServer());
// if none of the servers was reachable, update the user and switch to git_local_only
git_storage_update_progress(qPrintable(tr("Cloud connection failed")));
@ -90,9 +89,9 @@ bool CheckCloudConnection::checkServer()
void CheckCloudConnection::sslErrors(const QList<QSslError> &errorList)
{
report_info("Received error response trying to set up https connection with cloud storage backend:");
qDebug() << "Received error response trying to set up https connection with cloud storage backend:";
for (QSslError err: errorList)
report_info("%s", qPrintable(err.errorString()));
qDebug() << err.errorString();
}
bool CheckCloudConnection::nextServer()
@ -120,12 +119,12 @@ bool CheckCloudConnection::nextServer()
strcpy(baseurl, "https://");
strncat(baseurl, server, s);
strcat(baseurl, "/");
report_info("failed to connect to %s next server to try: %s", prefs.cloud_base_url, baseurl);
qDebug() << "failed to connect to" << prefs.cloud_base_url << "next server to try: " << baseurl;
prefs.cloud_base_url = baseurl;
git_storage_update_progress(qPrintable(tr("Trying different cloud server...")));
return true;
}
report_info("failed to connect to any of the Subsurface cloud servers, giving up");
qDebug() << "failed to connect to any of the Subsurface cloud servers, giving up";
return false;
}
@ -144,7 +143,7 @@ void CheckCloudConnection::gotIP(QNetworkReply *reply)
if (reply->error() != QNetworkReply::NoError) {
// whatever, just use the default host
if (verbose)
report_info("%s got error reply from ip webservice - not changing cloud host", __func__);
qDebug() << __FUNCTION__ << "got error reply from ip webservice - not changing cloud host";
return;
}
QString addressString = reply->readAll();
@ -154,11 +153,11 @@ void CheckCloudConnection::gotIP(QNetworkReply *reply)
if (addr.isNull()) {
// this isn't an address, don't try to update the cloud host
if (verbose)
report_info("%s returned address doesn't appear to be valid (%s) - not changing cloud host", __func__, qPrintable(addressString));
qDebug() << __FUNCTION__ << "returned address doesn't appear to be valid (" << addressString << ") - not changing cloud host";
return;
}
if (verbose)
report_info("IP used for cloud server access %s", qPrintable(addressString));
qDebug() << "IP used for cloud server access" << addressString;
// now figure out which continent we are on
QNetworkRequest request(QString(GET_CONTINENT_API).arg(addressString));
request.setRawHeader("Accept", "text/plain");
@ -173,7 +172,7 @@ void CheckCloudConnection::gotContinent(QNetworkReply *reply)
if (reply->error() != QNetworkReply::NoError) {
// whatever, just use the default host
if (verbose)
report_info("%s got error reply from ip location webservice - not changing cloud host", __func__);
qDebug() << __FUNCTION__ << "got error reply from ip location webservice - not changing cloud host";
return;
}
QString continentString = reply->readAll();
@ -194,7 +193,7 @@ void CheckCloudConnection::gotContinent(QNetworkReply *reply)
base_url = "https://" CLOUD_HOST_EU "/";
if (!same_string(base_url, prefs.cloud_base_url)) {
if (verbose)
report_info("remember cloud server %s based on IP location in %s", base_url, qPrintable(continentString));
qDebug() << "remember cloud server" << base_url << "based on IP location in " << continentString;
qPrefCloudStorage::instance()->store_cloud_base_url(base_url);
}
}
@ -203,18 +202,17 @@ void CheckCloudConnection::gotContinent(QNetworkReply *reply)
extern "C" bool canReachCloudServer(struct git_info *info)
{
if (verbose)
qWarning() << "Cloud storage: checking connection to cloud server" << info->url.c_str();
qWarning() << "Cloud storage: checking connection to cloud server" << info->url;
bool connection = CheckCloudConnection().checkServer();
if (info->url.find(prefs.cloud_base_url) == std::string::npos) {
if (strstr(info->url, prefs.cloud_base_url) == nullptr) {
// we switched the cloud URL - likely because we couldn't reach the server passed in
// the strstr with the offset is designed so we match the right component in the name;
// the cloud_base_url ends with a '/', so we need the text starting at "git/..."
size_t pos = info->url.find("org/git/");
if (pos != std::string::npos) {
info->url = format_string_std("%s%s", prefs.cloud_base_url, info->url.c_str() + pos + 4);
char *newremote = format_string("%s%s", prefs.cloud_base_url, strstr(info->url, "org/git/") + 4);
if (verbose)
report_info("updating remote to: %s", info->url.c_str());
}
qDebug() << "updating remote to: " << newremote;
free((void*)info->url);
info->url = newremote;
}
return connection;
}

View File

@ -74,30 +74,31 @@ QNetworkReply* CloudStorageAuthenticate::deleteAccount(const QString& email, con
void CloudStorageAuthenticate::deleteFinished()
{
std::string cloudAuthReply = reply->readAll().toStdString();
report_info("Completed connection with cloud storage backend, response %s", cloudAuthReply.c_str());
QString cloudAuthReply(reply->readAll());
qDebug() << "Completed connection with cloud storage backend, response" << cloudAuthReply;
emit finishedDelete();
}
void CloudStorageAuthenticate::uploadFinished()
{
static std::string myLastError;
static QString myLastError;
std::string cloudAuthReply = reply->readAll().toStdString();
report_info("Completed connection with cloud storage backend, response %s", cloudAuthReply.c_str());
QString cloudAuthReply(reply->readAll());
qDebug() << "Completed connection with cloud storage backend, response" << cloudAuthReply;
if (cloudAuthReply == "[VERIFIED]" || cloudAuthReply == "[OK]") {
if (cloudAuthReply == QLatin1String("[VERIFIED]") || cloudAuthReply == QLatin1String("[OK]")) {
qPrefCloudStorage::set_cloud_verification_status(qPrefCloudStorage::CS_VERIFIED);
/* TODO: Move this to a correct place
NotificationWidget *nw = MainWindow::instance()->getNotificationWidget();
if (nw->getNotificationText().toStdString() == myLastError)
if (nw->getNotificationText() == myLastError)
nw->hideNotification();
*/
myLastError.clear();
} else if (cloudAuthReply == "[VERIFY]" || cloudAuthReply == "Invalid PIN") {
} else if (cloudAuthReply == QLatin1String("[VERIFY]") ||
cloudAuthReply == QLatin1String("Invalid PIN")) {
qPrefCloudStorage::set_cloud_verification_status(qPrefCloudStorage::CS_NEED_TO_VERIFY);
report_error("%s", qPrintable(tr("Cloud account verification required, enter PIN in preferences")));
} else if (cloudAuthReply == "[PASSWDCHANGED]") {
} else if (cloudAuthReply == QLatin1String("[PASSWDCHANGED]")) {
qPrefCloudStorage::set_cloud_storage_password(cloudNewPassword);
cloudNewPassword.clear();
emit passwordChangeSuccessful();
@ -105,28 +106,28 @@ void CloudStorageAuthenticate::uploadFinished()
} else {
qPrefCloudStorage::set_cloud_verification_status(qPrefCloudStorage::CS_INCORRECT_USER_PASSWD);
myLastError = cloudAuthReply;
report_error("%s", cloudAuthReply.c_str());
report_error("%s", qPrintable(cloudAuthReply));
}
emit finishedAuthenticate();
}
void CloudStorageAuthenticate::uploadError(QNetworkReply::NetworkError)
{
report_info("Received error response from cloud storage backend: %s", qPrintable(reply->errorString()));
qDebug() << "Received error response from cloud storage backend:" << reply->errorString();
}
void CloudStorageAuthenticate::sslErrors(const QList<QSslError> &errorList)
{
if (verbose) {
report_info("Received error response trying to set up https connection with cloud storage backend:");
qDebug() << "Received error response trying to set up https connection with cloud storage backend:";
for (QSslError err: errorList) {
report_info("%s", qPrintable(err.errorString()));
qDebug() << err.errorString();
}
}
QSslConfiguration conf = reply->sslConfiguration();
QSslCertificate cert = conf.peerCertificate();
QByteArray hexDigest = cert.digest().toHex();
report_info("got invalid SSL certificate with hex digest %s", qPrintable(hexDigest));
qDebug() << "got invalid SSL certificate with hex digest" << hexDigest;
}
QNetworkAccessManager *manager()

View File

@ -721,6 +721,7 @@ int datatrak_import(std::string &mem, std::string &wl_mem, struct divelog *log)
i++;
}
out:
taglist_cleanup(&g_tag_list);
sort_dive_table(log->dives);
return rc;
bail:

View File

@ -13,6 +13,8 @@
* set_gf() - set Buehlmann gradient factors
* set_vpmb_conservatism() - set VPM-B conservatism value
* clear_deco()
* cache_deco_state()
* restore_deco_state()
* dump_tissues()
*/
#include <stdlib.h>
@ -216,7 +218,7 @@ static double vpmb_tolerated_ambient_pressure(struct deco_state *ds, double refe
return ds->tissue_n2_sat[ci] + ds->tissue_he_sat[ci] + vpmb_config.other_gases_pressure - total_gradient;
}
extern "C" double tissue_tolerance_calc(struct deco_state *ds, const struct dive *dive, double pressure, bool in_planner)
double tissue_tolerance_calc(struct deco_state *ds, const struct dive *dive, double pressure, bool in_planner)
{
int ci = -1;
double ret_tolerance_limit_ambient_pressure = 0.0;
@ -323,7 +325,7 @@ static double calc_surface_phase(double surface_pressure, double he_pressure, do
return 0;
}
extern "C" void vpmb_start_gradient(struct deco_state *ds)
void vpmb_start_gradient(struct deco_state *ds)
{
int ci;
@ -333,7 +335,7 @@ extern "C" void vpmb_start_gradient(struct deco_state *ds)
}
}
extern "C" void vpmb_next_gradient(struct deco_state *ds, double deco_time, double surface_pressure, bool in_planner)
void vpmb_next_gradient(struct deco_state *ds, double deco_time, double surface_pressure, bool in_planner)
{
int ci;
double n2_b, n2_c;
@ -379,7 +381,7 @@ static double solve_cubic(double A, double B, double C)
}
extern "C" void nuclear_regeneration(struct deco_state *ds, double time)
void nuclear_regeneration(struct deco_state *ds, double time)
{
time /= 60.0;
int ci;
@ -411,7 +413,7 @@ static double calc_inner_pressure(double crit_radius, double onset_tension, doub
}
// Calculates the crushing pressure in the given moment. Updates crushing_onset_tension and critical radius if needed
extern "C" void calc_crushing_pressure(struct deco_state *ds, double pressure)
void calc_crushing_pressure(struct deco_state *ds, double pressure)
{
int ci;
double gradient;
@ -436,15 +438,16 @@ extern "C" void calc_crushing_pressure(struct deco_state *ds, double pressure)
n2_crushing_pressure = pressure - n2_inner_pressure;
he_crushing_pressure = pressure - he_inner_pressure;
}
ds->max_n2_crushing_pressure[ci] = std::max(ds->max_n2_crushing_pressure[ci], n2_crushing_pressure);
ds->max_he_crushing_pressure[ci] = std::max(ds->max_he_crushing_pressure[ci], he_crushing_pressure);
ds->max_n2_crushing_pressure[ci] = MAX(ds->max_n2_crushing_pressure[ci], n2_crushing_pressure);
ds->max_he_crushing_pressure[ci] = MAX(ds->max_he_crushing_pressure[ci], he_crushing_pressure);
}
ds->max_ambient_pressure = std::max(pressure, ds->max_ambient_pressure);
ds->max_ambient_pressure = MAX(pressure, ds->max_ambient_pressure);
}
/* add period_in_seconds at the given pressure and gas to the deco calculation */
extern "C" void add_segment(struct deco_state *ds, double pressure, struct gasmix gasmix, int period_in_seconds, int ccpo2, enum divemode_t divemode, int, bool in_planner)
void add_segment(struct deco_state *ds, double pressure, struct gasmix gasmix, int period_in_seconds, int ccpo2, enum divemode_t divemode, int sac, bool in_planner)
{
UNUSED(sac);
int ci;
struct gas_pressures pressures;
bool icd = false;
@ -476,7 +479,7 @@ extern "C" void add_segment(struct deco_state *ds, double pressure, struct gasmi
}
#if DECO_CALC_DEBUG
extern "C" void dump_tissues(struct deco_state *ds)
void dump_tissues(struct deco_state *ds)
{
int ci;
printf("N2 tissues:");
@ -489,7 +492,7 @@ extern "C" void dump_tissues(struct deco_state *ds)
}
#endif
extern "C" void clear_vpmb_state(struct deco_state *ds)
void clear_vpmb_state(struct deco_state *ds)
{
int ci;
for (ci = 0; ci < 16; ci++) {
@ -501,7 +504,7 @@ extern "C" void clear_vpmb_state(struct deco_state *ds)
ds->max_bottom_ceiling_pressure.mbar = 0;
}
extern "C" void clear_deco(struct deco_state *ds, double surface_pressure, bool in_planner)
void clear_deco(struct deco_state *ds, double surface_pressure, bool in_planner)
{
int ci;
@ -520,17 +523,19 @@ extern "C" void clear_deco(struct deco_state *ds, double surface_pressure, bool
ds->ci_pointing_to_guiding_tissue = -1;
}
void deco_state_cache::cache(const struct deco_state *src)
void cache_deco_state(struct deco_state *src, struct deco_state **cached_datap)
{
if (!data)
data = std::make_unique<deco_state>();
struct deco_state *data = *cached_datap;
if (!data) {
data = malloc(sizeof(struct deco_state));
*cached_datap = data;
}
*data = *src;
}
void deco_state_cache::restore(struct deco_state *target, bool keep_vpmb_state) const
void restore_deco_state(struct deco_state *data, struct deco_state *target, bool keep_vpmb_state)
{
if (!data)
return;
if (keep_vpmb_state) {
int ci;
for (ci = 0; ci < 16; ci++) {
@ -543,9 +548,10 @@ void deco_state_cache::restore(struct deco_state *target, bool keep_vpmb_state)
data->max_bottom_ceiling_pressure = target->max_bottom_ceiling_pressure;
}
*target = *data;
}
extern "C" int deco_allowed_depth(double tissues_tolerance, double surface_pressure, const struct dive *dive, bool smooth)
int deco_allowed_depth(double tissues_tolerance, double surface_pressure, const struct dive *dive, bool smooth)
{
int depth;
double pressure_delta;
@ -564,7 +570,7 @@ extern "C" int deco_allowed_depth(double tissues_tolerance, double surface_press
return depth;
}
extern "C" void set_gf(short gflow, short gfhigh)
void set_gf(short gflow, short gfhigh)
{
if (gflow != -1)
buehlmann_config.gf_low = (double)gflow / 100.0;
@ -572,7 +578,7 @@ extern "C" void set_gf(short gflow, short gfhigh)
buehlmann_config.gf_high = (double)gfhigh / 100.0;
}
extern "C" void set_vpmb_conservatism(short conservatism)
void set_vpmb_conservatism(short conservatism)
{
if (conservatism < 0)
vpmb_config.conservatism = 0;
@ -582,21 +588,21 @@ extern "C" void set_vpmb_conservatism(short conservatism)
vpmb_config.conservatism = conservatism;
}
extern "C" double get_gf(struct deco_state *ds, double ambpressure_bar, const struct dive *dive)
double get_gf(struct deco_state *ds, double ambpressure_bar, const struct dive *dive)
{
double surface_pressure_bar = get_surface_pressure_in_mbar(dive, true) / 1000.0;
double gf_low = buehlmann_config.gf_low;
double gf_high = buehlmann_config.gf_high;
double gf;
if (ds->gf_low_pressure_this_dive > surface_pressure_bar)
gf = std::max((double)gf_low, (ambpressure_bar - surface_pressure_bar) /
gf = MAX((double)gf_low, (ambpressure_bar - surface_pressure_bar) /
(ds->gf_low_pressure_this_dive - surface_pressure_bar) * (gf_low - gf_high) + gf_high);
else
gf = gf_low;
return gf;
}
extern "C" double regressiona(const struct deco_state *ds)
double regressiona(const struct deco_state *ds)
{
if (ds->sum1 > 1) {
double avxy = ds->sumxy / ds->sum1;
@ -609,7 +615,7 @@ extern "C" double regressiona(const struct deco_state *ds)
return 0.0;
}
extern "C" double regressionb(const struct deco_state *ds)
double regressionb(const struct deco_state *ds)
{
if (ds->sum1)
return ds->sumy / ds->sum1 - ds->sumx * regressiona(ds) / ds->sum1;
@ -617,14 +623,14 @@ extern "C" double regressionb(const struct deco_state *ds)
return 0.0;
}
extern "C" void reset_regression(struct deco_state *ds)
void reset_regression(struct deco_state *ds)
{
ds->sum1 = 0;
ds->sumxx = ds->sumx = 0L;
ds->sumy = ds->sumxy = 0.0;
}
extern "C" void update_regression(struct deco_state *ds, const struct dive *dive)
void update_regression(struct deco_state *ds, const struct dive *dive)
{
if (!ds->plot_depth)
return;

View File

@ -56,6 +56,8 @@ extern void clear_deco(struct deco_state *ds, double surface_pressure, bool in_p
extern void dump_tissues(struct deco_state *ds);
extern void set_gf(short gflow, short gfhigh);
extern void set_vpmb_conservatism(short conservatism);
extern void cache_deco_state(struct deco_state *source, struct deco_state **datap);
extern void restore_deco_state(struct deco_state *data, struct deco_state *target, bool keep_vpmb_state);
extern void nuclear_regeneration(struct deco_state *ds, double time);
extern void vpmb_start_gradient(struct deco_state *ds);
extern void vpmb_next_gradient(struct deco_state *ds, double deco_time, double surface_pressure, bool in_planner);
@ -72,21 +74,6 @@ extern void update_regression(struct deco_state *ds, const struct dive *dive);
#ifdef __cplusplus
}
// C++ only functions
#include <memory>
struct deco_state_cache {
// Test if there is cached data
operator bool () {
return !!data;
}
void cache(const struct deco_state *source);
void restore(struct deco_state *target, bool keep_vpmb_state) const;
private:
std::unique_ptr<deco_state> data;
};
#endif
#endif // DECO_H

View File

@ -4,7 +4,7 @@
#include "divelist.h"
#include "divelog.h"
#include "subsurface-string.h"
#include "errorhelper.h"
#include "errorhelper.h" // for verbose flag
#include "selection.h"
#include "core/settings/qPrefDiveComputer.h"
@ -60,9 +60,9 @@ void device::showchanges(const std::string &n) const
{
if (nickName != n) {
if (!n.empty())
report_info("new nickname %s for DC model %s serial %s", n.c_str(), model.c_str(), serialNumber.c_str());
qDebug("new nickname %s for DC model %s serial %s", n.c_str(), model.c_str(), serialNumber.c_str());
else
report_info("deleted nickname %s for DC model %s serial %s", nickName.c_str(), model.c_str(), serialNumber.c_str());
qDebug("deleted nickname %s for DC model %s serial %s", nickName.c_str(), model.c_str(), serialNumber.c_str());
}
}

View File

@ -339,20 +339,18 @@ extern "C" void selective_copy_dive(const struct dive *s, struct dive *d, struct
}
#undef CONDITIONAL_COPY_STRING
/* copies all events from the given dive computer before a given time
/* copies all events from all dive computers before a given time
this is used when editing a dive in the planner to preserve the events
of the old dive */
extern "C" void copy_events_until(const struct dive *sd, struct dive *dd, int dcNr, int time)
extern "C" void copy_events_until(const struct dive *sd, struct dive *dd, int time)
{
if (!sd || !dd)
return;
const struct divecomputer *s = &sd->dc;
struct divecomputer *d = get_dive_dc(dd, dcNr);
if (!s || !d)
return;
struct divecomputer *d = &dd->dc;
while (s && d) {
const struct event *ev;
ev = s->events;
while (ev != NULL) {
@ -361,6 +359,9 @@ extern "C" void copy_events_until(const struct dive *sd, struct dive *dd, int dc
add_event(d, ev->time.seconds, ev->type, ev->flags, ev->value, ev->name);
ev = ev->next;
}
s = s->next;
d = d->next;
}
}
extern "C" int nr_cylinders(const struct dive *dive)
@ -607,7 +608,7 @@ extern "C" int explicit_first_cylinder(const struct dive *dive, const struct div
if (ev && ((dc->sample && ev->time.seconds == dc->sample[0].time.seconds) || ev->time.seconds <= 1))
res = get_cylinder_index(dive, ev);
else if (dc->divemode == CCR)
res = std::max(get_cylinder_idx_by_use(dive, DILUENT), res);
res = MAX(get_cylinder_idx_by_use(dive, DILUENT), res);
}
return res < dive->cylinders.nr ? res : 0;
}
@ -657,7 +658,7 @@ extern "C" void update_setpoint_events(const struct dive *dive, struct divecompu
ev->value = new_setpoint;
} else {
if (!add_event(dc, 0, SAMPLE_EVENT_PO2, 0, new_setpoint, "SP change"))
report_info("Could not add setpoint change event");
fprintf(stderr, "Could not add setpoint change event\n");
}
}
@ -851,7 +852,7 @@ static void fixup_duration(struct dive *dive)
duration_t duration = { };
for_each_relevant_dc (dive, dc) {
duration.seconds = std::max(duration.seconds, dc->duration.seconds);
duration.seconds = MAX(duration.seconds, dc->duration.seconds);
}
dive->duration.seconds = duration.seconds;
}
@ -968,7 +969,7 @@ static void fixup_dc_depths(struct dive *dive, struct divecomputer *dc)
}
update_depth(&dc->maxdepth, maxdepth);
if (!is_logged(dive) || !is_dc_planner(dc))
if (!has_planned(dive, false) || !is_dc_planner(dc))
if (maxdepth > dive->maxdepth.mm)
dive->maxdepth.mm = maxdepth;
}
@ -1310,8 +1311,8 @@ extern "C" struct dive *fixup_dive(struct dive *dive)
}
/* Don't pick a zero for MERGE_MIN() */
#define MERGE_MAX(res, a, b, n) res->n = std::max(a->n, b->n)
#define MERGE_MIN(res, a, b, n) res->n = (a->n) ? (b->n) ? std::min(a->n, b->n) : (a->n) : (b->n)
#define MERGE_MAX(res, a, b, n) res->n = MAX(a->n, b->n)
#define MERGE_MIN(res, a, b, n) res->n = (a->n) ? (b->n) ? MIN(a->n, b->n) : (a->n) : (b->n)
#define MERGE_TXT(res, a, b, n, sep) res->n = merge_text(a->n, b->n, sep)
#define MERGE_NONZERO(res, a, b, n) res->n = a->n ? a->n : b->n
@ -2309,8 +2310,8 @@ static int likely_same_dive(const struct dive *a, const struct dive *b)
int match, fuzz = 20 * 60;
/* don't merge manually added dives with anything */
if (is_dc_manually_added_dive(&a->dc) ||
is_dc_manually_added_dive(&b->dc))
if (is_manually_added_dc(&a->dc) ||
is_manually_added_dc(&b->dc))
return 0;
/*
@ -2332,7 +2333,7 @@ static int likely_same_dive(const struct dive *a, const struct dive *b)
* Allow a time difference due to dive computer time
* setting etc. Check if they overlap.
*/
fuzz = std::max(a->duration.seconds, b->duration.seconds) / 2;
fuzz = MAX(a->duration.seconds, b->duration.seconds) / 2;
if (fuzz < 60)
fuzz = 60;
@ -2549,29 +2550,19 @@ static void join_dive_computers(struct dive *d, struct divecomputer *res,
remove_redundant_dc(res, prefer_downloaded);
}
static bool has_dc_type(const struct dive *dive, bool dc_is_planner)
// Does this dive have a dive computer for which is_dc_planner has value planned
extern "C" bool has_planned(const struct dive *dive, bool planned)
{
const struct divecomputer *dc = &dive->dc;
while (dc) {
if (is_dc_planner(dc) == dc_is_planner)
if (is_dc_planner(&dive->dc) == planned)
return true;
dc = dc->next;
}
return false;
}
// Does this dive have a dive computer for which is_dc_planner has value planned
extern "C" bool is_planned(const struct dive *dive)
{
return has_dc_type(dive, true);
}
extern "C" bool is_logged(const struct dive *dive)
{
return has_dc_type(dive, false);
}
/*
* Merging two dives can be subtle, because there's two different ways
* of merging:
@ -3072,7 +3063,7 @@ extern "C" struct dive *make_first_dc(const struct dive *d, int dc_number)
;
if (!dc) {
free(newdc);
report_info("data inconsistent: can't find the current DC");
fprintf(stderr, "data inconsistent: can't find the current DC");
return res;
}
dc->next = old_dc->next;
@ -3244,11 +3235,11 @@ extern "C" int depth_to_mbar(int depth, const struct dive *dive)
extern "C" double depth_to_mbarf(int depth, const struct dive *dive)
{
// For downloaded and planned dives, use DC's values
// To downloaded and planned dives, use DC's values
int salinity = dive->dc.salinity;
pressure_t surface_pressure = dive->dc.surface_pressure;
if (is_dc_manually_added_dive(&dive->dc)) { // For manual dives, salinity and pressure in another place...
if (is_manually_added_dc(&dive->dc)) { // To manual dives, salinity and pressure in another place...
surface_pressure = dive->surface_pressure;
salinity = dive->user_salinity;
}
@ -3271,8 +3262,8 @@ extern "C" double depth_to_atm(int depth, const struct dive *dive)
* take care of this, but the Uemis we support natively */
extern "C" int rel_mbar_to_depth(int mbar, const struct dive *dive)
{
// For downloaded and planned dives, use DC's salinity. Manual dives, use user's salinity
int salinity = is_dc_manually_added_dive(&dive->dc) ? dive->user_salinity : dive->dc.salinity;
// To downloaded and planned dives, use DC's salinity. Manual dives, use user's salinity
int salinity = is_manually_added_dc(&dive->dc) ? dive->user_salinity : dive->dc.salinity;
if (!salinity)
salinity = SEAWATER_SALINITY;
@ -3283,8 +3274,8 @@ extern "C" int rel_mbar_to_depth(int mbar, const struct dive *dive)
extern "C" int mbar_to_depth(int mbar, const struct dive *dive)
{
// For downloaded and planned dives, use DC's pressure. Manual dives, use user's pressure
pressure_t surface_pressure = is_dc_manually_added_dive(&dive->dc)
// To downloaded and planned dives, use DC's pressure. Manual dives, use user's pressure
pressure_t surface_pressure = is_manually_added_dc(&dive->dc)
? dive->surface_pressure
: dive->dc.surface_pressure;
@ -3395,7 +3386,7 @@ extern "C" struct dive *get_dive_by_uniq_id(int id)
}
#ifdef DEBUG
if (dive == NULL) {
report_info("Invalid id %x passed to get_dive_by_diveid, try to fix the code", id);
fprintf(stderr, "Invalid id %x passed to get_dive_by_diveid, try to fix the code\n", id);
exit(1);
}
#endif
@ -3413,7 +3404,7 @@ extern "C" int get_idx_by_uniq_id(int id)
}
#ifdef DEBUG
if (dive == NULL) {
report_info("Invalid id %x passed to get_dive_by_diveid, try to fix the code", id);
fprintf(stderr, "Invalid id %x passed to get_dive_by_diveid, try to fix the code\n", id);
exit(1);
}
#endif

View File

@ -141,7 +141,8 @@ void split_divecomputer(const struct dive *src, int num, struct dive **out1, str
for (_dc = &_dive->dc; _dc; _dc = _dc->next)
#define for_each_relevant_dc(_dive, _dc) \
for (_dc = &_dive->dc; _dc; _dc = _dc->next) if (!is_logged(_dive) || !is_dc_planner(_dc))
bool _all_planned = !has_planned(_dive, false); \
for (_dc = &_dive->dc; _dc; _dc = _dc->next) if (_all_planned || !is_dc_planner(_dc))
extern struct dive *get_dive_by_uniq_id(int id);
extern int get_idx_by_uniq_id(int id);
@ -186,7 +187,7 @@ extern int split_dive(const struct dive *dive, struct dive **new1, struct dive *
extern int split_dive_at_time(const struct dive *dive, duration_t time, struct dive **new1, struct dive **new2);
extern struct dive *merge_dives(const struct dive *a, const struct dive *b, int offset, bool prefer_downloaded, struct dive_trip **trip, struct dive_site **site);
extern struct dive *try_to_merge(struct dive *a, struct dive *b, bool prefer_downloaded);
extern void copy_events_until(const struct dive *sd, struct dive *dd, int dcNr, int time);
extern void copy_events_until(const struct dive *sd, struct dive *dd, int time);
extern void copy_used_cylinders(const struct dive *s, struct dive *d, bool used_only);
extern bool is_cylinder_used(const struct dive *dive, int idx);
extern bool is_cylinder_prot(const struct dive *dive, int idx);
@ -206,8 +207,7 @@ extern void invalidate_dive_cache(struct dive *dc);
extern int total_weight(const struct dive *);
extern bool is_planned(const struct dive *dive);
extern bool is_logged(const struct dive *dive);
extern bool has_planned(const struct dive *dive, bool planned);
/* Get gasmixes at increasing timestamps.
* In "evp", pass a pointer to a "struct event *" which is NULL-initialized on first invocation.

View File

@ -492,6 +492,11 @@ void add_extra_data(struct divecomputer *dc, const char *key, const char *value)
}
}
bool is_dc_planner(const struct divecomputer *dc)
{
return same_string(dc->model, "planned dive");
}
/*
* Match two dive computer entries against each other, and
* tell if it's the same dive. Return 0 if "don't know",
@ -543,27 +548,14 @@ void free_dc(struct divecomputer *dc)
free(dc);
}
static const char *planner_dc_name = "planned dive";
bool is_dc_planner(const struct divecomputer *dc)
static const char *manual_dc_name = "manually added dive";
bool is_manually_added_dc(const struct divecomputer *dc)
{
return dc && same_string(dc->model, planner_dc_name);
return dc && dc->samples <= 50 &&
same_string(dc->model, manual_dc_name);
}
void make_planner_dc(struct divecomputer *dc)
{
free((void *)dc->model);
dc->model = strdup(planner_dc_name);
}
const char *manual_dc_name = "manually added dive";
bool is_dc_manually_added_dive(const struct divecomputer *dc)
{
return dc && same_string(dc->model, manual_dc_name);
}
void make_manually_added_dive_dc(struct divecomputer *dc)
void make_manually_added_dc(struct divecomputer *dc)
{
free((void *)dc->model);
dc->model = strdup(manual_dc_name);

View File

@ -67,12 +67,10 @@ extern void add_event_to_dc(struct divecomputer *dc, struct event *ev);
extern struct event *add_event(struct divecomputer *dc, unsigned int time, int type, int flags, int value, const char *name);
extern void remove_event_from_dc(struct divecomputer *dc, struct event *event);
extern void add_extra_data(struct divecomputer *dc, const char *key, const char *value);
extern uint32_t calculate_string_hash(const char *str);
extern bool is_dc_planner(const struct divecomputer *dc);
extern void make_planner_dc(struct divecomputer *dc);
extern const char *manual_dc_name;
extern bool is_dc_manually_added_dive(const struct divecomputer *dc);
extern void make_manually_added_dive_dc(struct divecomputer *dc);
extern uint32_t calculate_string_hash(const char *str);
extern bool is_manually_added_dc(const struct divecomputer *dc);
extern void make_manually_added_dc(struct divecomputer *dc);
/* Check if two dive computer entries are the exact same dive (-1=no/0=maybe/1=yes) */
extern int match_one_dc(const struct divecomputer *a, const struct divecomputer *b);

View File

@ -561,7 +561,7 @@ int init_decompression(struct deco_state *ds, const struct dive *dive, bool in_p
}
add_segment(ds, surface_pressure, air, surface_time, 0, OC, prefs.decosac, in_planner);
#if DECO_CALC_DEBUG & 2
printf("Tissues after surface intervall of %d:%02u:\n", FRACTION_TUPLE(surface_time, 60));
printf("Tissues after surface intervall of %d:%02u:\n", FRACTION(surface_time, 60));
dump_tissues(ds);
#endif
}
@ -598,7 +598,7 @@ int init_decompression(struct deco_state *ds, const struct dive *dive, bool in_p
}
add_segment(ds, surface_pressure, air, surface_time, 0, OC, prefs.decosac, in_planner);
#if DECO_CALC_DEBUG & 2
printf("Tissues after surface intervall of %d:%02u:\n", FRACTION_TUPLE(surface_time, 60));
printf("Tissues after surface intervall of %d:%02u:\n", FRACTION(surface_time, 60));
dump_tissues(ds);
#endif
}
@ -767,6 +767,18 @@ struct dive *unregister_dive(int idx)
return dive;
}
/* this implements the mechanics of removing the dive from the global
* dive table and the trip, but doesn't deal with updating dive trips, etc */
void delete_single_dive(int idx)
{
struct dive *dive = get_dive(idx);
if (!dive)
return; /* this should never happen */
remove_dive_from_trip(dive, divelog.trips);
unregister_dive_from_dive_site(dive);
delete_dive_from_table(divelog.dives, idx);
}
void process_loaded_dives()
{
sort_dive_table(divelog.dives);
@ -977,7 +989,7 @@ void add_imported_dives(struct divelog *import_log, int flags)
/* Remove old dives */
for (i = 0; i < dives_to_remove.nr; i++) {
idx = get_divenr(dives_to_remove.dives[i]);
delete_single_dive(&divelog, idx);
delete_single_dive(idx);
}
dives_to_remove.nr = 0;
@ -1007,10 +1019,6 @@ void add_imported_dives(struct divelog *import_log, int flags)
current_dive = divelog.dives->nr > 0 ? divelog.dives->dives[divelog.dives->nr - 1] : NULL;
free_device_table(devices_to_add);
free(dives_to_add.dives);
free(dives_to_remove.dives);
free(trips_to_add.trips);
free(dive_sites_to_add.dive_sites);
/* Inform frontend of reset data. This should reset all the models. */
emit_reset_signal();

View File

@ -62,6 +62,7 @@ void clear_dive_file_data();
void clear_dive_table(struct dive_table *table);
void move_dive_table(struct dive_table *src, struct dive_table *dst);
struct dive *unregister_dive(int idx);
extern void delete_single_dive(int idx);
extern bool has_dive(unsigned int deviceid, unsigned int diveid);
#ifdef __cplusplus

View File

@ -3,7 +3,6 @@
#include "divelist.h"
#include "divesite.h"
#include "device.h"
#include "errorhelper.h"
#include "filterpreset.h"
#include "trip.h"
@ -64,28 +63,14 @@ struct divelog &divelog::operator=(divelog &&log)
return *this;
}
/* this implements the mechanics of removing the dive from the
* dive log and the trip, but doesn't deal with updating dive trips, etc */
void delete_single_dive(struct divelog *log, int idx)
{
if (idx < 0 || idx > log->dives->nr) {
report_info("Warning: deleting unexisting dive with index %d", idx);
return;
}
struct dive *dive = log->dives->dives[idx];
remove_dive_from_trip(dive, log->trips);
unregister_dive_from_dive_site(dive);
delete_dive_from_table(log->dives, idx);
}
void divelog::clear()
{
while (dives->nr > 0)
delete_single_dive(this, dives->nr - 1);
while (dives->nr)
delete_single_dive(0);
while (sites->nr)
delete_dive_site(get_dive_site(0, sites), sites);
if (trips->nr != 0) {
report_info("Warning: trip table not empty in divelog::clear()!");
fprintf(stderr, "Warning: trip table not empty in divelog::clear()!\n");
trips->nr = 0;
}
clear_device_table(devices);

View File

@ -34,7 +34,6 @@ extern "C" {
#endif
void clear_divelog(struct divelog *);
extern void delete_single_dive(struct divelog *, int idx);
#ifdef __cplusplus
}

View File

@ -4,7 +4,6 @@
#include "dive.h"
#include "divelist.h"
#include "divelog.h"
#include "errorhelper.h"
#include "membuffer.h"
#include "subsurface-string.h"
#include "table.h"
@ -372,17 +371,17 @@ void add_dive_to_dive_site(struct dive *d, struct dive_site *ds)
{
int idx;
if (!d) {
report_info("Warning: add_dive_to_dive_site called with NULL dive");
fprintf(stderr, "Warning: add_dive_to_dive_site called with NULL dive\n");
return;
}
if (!ds) {
report_info("Warning: add_dive_to_dive_site called with NULL dive site");
fprintf(stderr, "Warning: add_dive_to_dive_site called with NULL dive site\n");
return;
}
if (d->dive_site == ds)
return;
if (d->dive_site) {
report_info("Warning: adding dive that already belongs to a dive site to a different site");
fprintf(stderr, "Warning: adding dive that already belongs to a dive site to a different site\n");
unregister_dive_from_dive_site(d);
}
idx = dive_table_get_insertion_index(&ds->dives, d);

View File

@ -9,6 +9,7 @@
#include "errorhelper.h"
#include "subsurface-string.h"
#include "qthelper.h"
#include <QDebug>
#include <QJsonDocument>
#include <QJsonArray>
#include <QJsonObject>
@ -123,6 +124,7 @@ taxonomy_data reverseGeoLookup(degrees_t latitude, degrees_t longitude)
}
} else {
report_error("geonames.org did not provide reverse lookup information");
//qDebug() << "no reverse geo lookup; geonames returned\n" << fullReply;
}
return taxonomy;

View File

@ -1,10 +1,10 @@
#include "downloadfromdcthread.h"
#include "core/errorhelper.h"
#include "core/libdivecomputer.h"
#include "core/qthelper.h"
#include "core/range.h"
#include "core/settings/qPrefDiveComputer.h"
#include "core/divelist.h"
#include <QDebug>
#if defined(Q_OS_ANDROID)
#include "core/subsurface-string.h"
#endif
@ -91,7 +91,7 @@ void DownloadThread::run()
internalData->log = &log;
internalData->btname = strdup(m_data->devBluetoothName().toUtf8());
if (!internalData->descriptor) {
report_info("No download possible when DC type is unknown");
qDebug() << "No download possible when DC type is unknown";
return;
}
// get the list of transports that this device supports and filter depending on Bluetooth option
@ -103,8 +103,8 @@ void DownloadThread::run()
if (transports == DC_TRANSPORT_USBHID)
internalData->devname = "";
report_info("Starting download from %s", qPrintable(getTransportString(transports)));
report_info("downloading %s dives", internalData->force_download ? "all" : "only new");
qDebug() << "Starting download from " << getTransportString(transports);
qDebug() << "downloading" << (internalData->force_download ? "all" : "only new") << "dives";
clear_divelog(&log);
Q_ASSERT(internalData->log != nullptr);
@ -117,11 +117,11 @@ void DownloadThread::run()
errorText = do_libdivecomputer_import(internalData);
if (errorText) {
error = str_error(errorText, internalData->devname, internalData->vendor, internalData->product);
report_info("Finishing download thread: %s", qPrintable(error));
qDebug() << "Finishing download thread:" << error;
} else {
if (!log.dives->nr)
error = tr("No new dives downloaded from dive computer");
report_info("Finishing download thread: %d dives downloaded", log.dives->nr);
qDebug() << "Finishing download thread:" << log.dives->nr << "dives downloaded";
}
qPrefDiveComputer::set_vendor(internalData->vendor);
qPrefDiveComputer::set_product(internalData->product);
@ -193,7 +193,7 @@ void fill_computer_list()
void show_computer_list()
{
unsigned int transportMask = get_supported_transports(NULL);
report_info("Supported dive computers:");
qDebug() << "Supported dive computers:";
for (const QString &vendor: vendorList) {
QString msg = vendor + ": ";
for (const QString &product: productList[vendor]) {
@ -203,7 +203,7 @@ void show_computer_list()
msg += product + " (" + transportString + "), ";
}
msg.chop(2);
report_info("%s", qPrintable(msg));
qDebug() << msg;
}
}
@ -274,7 +274,7 @@ QString DCDeviceData::devBluetoothName() const
QString DCDeviceData::descriptor() const
{
return QString();
return "";
}
bool DCDeviceData::bluetoothMode() const

View File

@ -16,7 +16,6 @@
#include "dive.h"
#include "divelist.h"
#include "divelog.h"
#include "errorhelper.h"
#include "pref.h"
#include "subsurface-string.h"
#include "table.h"
@ -109,7 +108,7 @@ void add_tank_info_imperial(struct tank_info_table *table, const char *name, int
add_to_tank_info_table(table, table->nr, info);
}
static struct tank_info *get_tank_info(struct tank_info_table *table, const char *name)
extern struct tank_info *get_tank_info(struct tank_info_table *table, const char *name)
{
for (int i = 0; i < table->nr; ++i) {
if (same_string(table->infos[i].name, name))
@ -118,41 +117,34 @@ static struct tank_info *get_tank_info(struct tank_info_table *table, const char
return NULL;
}
extern void set_tank_info_data(struct tank_info_table *table, const char *name, volume_t size, pressure_t working_pressure)
extern void set_tank_info_size(struct tank_info_table *table, const char *name, volume_t size)
{
struct tank_info *info = get_tank_info(table, name);
if (info) {
if (info->ml != 0 || info->bar != 0) {
info->bar = working_pressure.mbar / 1000;
// Try to be smart about metric vs. imperial
if (info->cuft == 0 && info->psi == 0)
info->ml = size.mliter;
else
info->cuft = lrint(ml_to_cuft(size.mliter));
} else {
info->psi = lrint(to_PSI(working_pressure));
info->cuft = lrint(ml_to_cuft(size.mliter) * mbar_to_atm(working_pressure.mbar));
}
} else {
// Metric is a better choice as the volume is independent of the working pressure
add_tank_info_metric(table, name, size.mliter, working_pressure.mbar / 1000);
// By default add metric...?
add_tank_info_metric(table, name, size.mliter, 0);
}
}
extern void extract_tank_info(const struct tank_info *info, volume_t *size, pressure_t *working_pressure)
{
working_pressure->mbar = info->bar != 0 ? info->bar * 1000 : psi_to_mbar(info->psi);
if (info->ml != 0)
size->mliter = info->ml;
else if (working_pressure->mbar != 0)
size->mliter = lrint(cuft_to_l(info->cuft) * 1000 / mbar_to_atm(working_pressure->mbar));
}
extern bool get_tank_info_data(struct tank_info_table *table, const char *name, volume_t *size, pressure_t *working_pressure)
extern void set_tank_info_workingpressure(struct tank_info_table *table, const char *name, pressure_t working_pressure)
{
struct tank_info *info = get_tank_info(table, name);
if (info) {
extract_tank_info(info, size, working_pressure);
return true;
// Try to be smart about metric vs. imperial
if (info->cuft == 0 && info->psi == 0)
info->bar = working_pressure.mbar / 1000;
else
info->psi = lrint(mbar_to_PSI(working_pressure.mbar));
} else {
// By default add metric...?
add_tank_info_metric(table, name, 0, working_pressure.mbar / 1000);
}
return false;
}
/* placeholders for a few functions that we need to redesign for the Qt UI */
@ -214,6 +206,13 @@ void add_cloned_weightsystem(struct weightsystem_table *t, weightsystem_t ws)
add_to_weightsystem_table(t, t->nr, clone_weightsystem(ws));
}
/* Add a clone of a weightsystem to the end of a weightsystem table.
* Cloned means that the description-string is copied. */
void add_cloned_weightsystem_at(struct weightsystem_table *t, weightsystem_t ws)
{
add_to_weightsystem_table(t, t->nr, clone_weightsystem(ws));
}
cylinder_t clone_cylinder(cylinder_t cyl)
{
cylinder_t res = cyl;
@ -461,7 +460,7 @@ cylinder_t *get_cylinder(const struct dive *d, int idx)
* in the table to mark no-cylinder surface interavals. This is horrendous. Fix ASAP. */
// if (idx < 0 || idx >= d->cylinders.nr) {
if (idx < 0 || idx >= d->cylinders.nr + 1 || idx >= d->cylinders.allocated) {
report_info("Warning: accessing invalid cylinder %d (%d existing)", idx, d->cylinders.nr);
fprintf(stderr, "Warning: accessing invalid cylinder %d (%d existing)\n", idx, d->cylinders.nr);
return NULL;
}
return &d->cylinders.cylinders[idx];
@ -470,7 +469,7 @@ cylinder_t *get_cylinder(const struct dive *d, int idx)
cylinder_t *get_or_create_cylinder(struct dive *d, int idx)
{
if (idx < 0) {
report_info("Warning: accessing invalid cylinder %d", idx);
fprintf(stderr, "Warning: accessing invalid cylinder %d\n", idx);
return NULL;
}
while (idx >= d->cylinders.nr)
@ -510,37 +509,11 @@ cylinder_t create_new_cylinder(const struct dive *d)
cylinder_t cyl = empty_cylinder;
fill_default_cylinder(d, &cyl);
cyl.start = cyl.type.workingpressure;
cyl.manually_added = true;
cyl.cylinder_use = OC_GAS;
return cyl;
}
cylinder_t create_new_manual_cylinder(const struct dive *d)
{
cylinder_t cyl = create_new_cylinder(d);
cyl.manually_added = true;
return cyl;
}
void add_default_cylinder(struct dive *d)
{
// Only add if there are no cylinders yet
if (d->cylinders.nr > 0)
return;
cylinder_t cyl;
if (!empty_string(prefs.default_cylinder)) {
cyl = create_new_cylinder(d);
} else {
cyl = empty_cylinder;
// roughly an AL80
cyl.type.description = strdup(translate("gettextFromC", "unknown"));
cyl.type.size.mliter = 11100;
cyl.type.workingpressure.mbar = 207000;
}
add_cylinder(&d->cylinders, 0, cyl);
reset_cylinders(d, false);
}
static bool show_cylinder(const struct dive *d, int i)
{
if (is_cylinder_used(d, i))

View File

@ -93,8 +93,7 @@ extern void reset_cylinders(struct dive *dive, bool track_gas);
extern int gas_volume(const cylinder_t *cyl, pressure_t p); /* Volume in mliter of a cylinder at pressure 'p' */
extern int find_best_gasmix_match(struct gasmix mix, const struct cylinder_table *cylinders);
extern void fill_default_cylinder(const struct dive *dive, cylinder_t *cyl); /* dive is needed to fill out MOD, which depends on salinity. */
extern cylinder_t create_new_manual_cylinder(const struct dive *dive); /* dive is needed to fill out MOD, which depends on salinity. */
extern void add_default_cylinder(struct dive *dive);
extern cylinder_t create_new_cylinder(const struct dive *dive); /* dive is needed to fill out MOD, which depends on salinity. */
extern int first_hidden_cylinder(const struct dive *d);
#ifdef DEBUG_CYL
extern void dump_cylinders(struct dive *dive, bool verbose);
@ -126,9 +125,9 @@ extern void reset_tank_info_table(struct tank_info_table *table);
extern void clear_tank_info_table(struct tank_info_table *table);
extern void add_tank_info_metric(struct tank_info_table *table, const char *name, int ml, int bar);
extern void add_tank_info_imperial(struct tank_info_table *table, const char *name, int cuft, int psi);
extern void extract_tank_info(const struct tank_info *info, volume_t *size, pressure_t *working_pressure);
extern bool get_tank_info_data(struct tank_info_table *table, const char *name, volume_t *size, pressure_t *pressure);
extern void set_tank_info_data(struct tank_info_table *table, const char *name, volume_t size, pressure_t working_pressure);
extern void set_tank_info_size(struct tank_info_table *table, const char *name, volume_t size);
extern void set_tank_info_workingpressure(struct tank_info_table *table, const char *name, pressure_t working_pressure);
extern struct tank_info *get_tank_info(struct tank_info_table *table, const char *name);
struct ws_info_t {
const char *name;

View File

@ -6,6 +6,7 @@
#include <stdarg.h>
#include "errorhelper.h"
#include "membuffer.h"
#include "qthelper.h"
#if !defined(Q_OS_ANDROID) && !defined(__ANDROID__)
#define LOG_MSG(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
@ -20,7 +21,7 @@ int verbose;
void report_info(const char *fmt, ...)
{
struct membufferpp buf;
struct membuffer buf = { 0 };
VA_BUF(&buf, fmt);
strip_mb(&buf);
@ -31,7 +32,7 @@ static void (*error_cb)(char *) = NULL;
int report_error(const char *fmt, ...)
{
struct membufferpp buf;
struct membuffer buf = { 0 };
VA_BUF(&buf, fmt);
strip_mb(&buf);

View File

@ -13,7 +13,6 @@
#include "dive.h"
#include "divelog.h"
#include "subsurface-string.h"
#include "format.h"
#include "errorhelper.h"
#include "file.h"
#include "git-access.h"
@ -125,7 +124,7 @@ static int try_to_open_db(const char *filename, std::string &mem, struct divelog
retval = sqlite3_open(filename, &handle);
if (retval) {
report_info("Database connection failed '%s'", filename);
fprintf(stderr, "Database connection failed '%s'.\n", filename);
return 1;
}
@ -251,14 +250,14 @@ static int parse_file_buffer(const char *filename, std::string &mem, struct dive
return parse_xml_buffer(filename, mem.data(), mem.size(), log, NULL);
}
bool remote_repo_uptodate(const char *filename, struct git_info *info)
extern "C" bool remote_repo_uptodate(const char *filename, struct git_info *info)
{
std::string current_sha = saved_git_id;
if (is_git_repository(filename, info) && open_git_repository(info)) {
std::string sha = get_sha(info->repo, info->branch);
if (!sha.empty() && current_sha == sha) {
report_info("already have loaded SHA %s - don't load again", sha.c_str());
fprintf(stderr, "already have loaded SHA %s - don't load again\n", sha.c_str());
return true;
}
}
@ -279,11 +278,14 @@ extern "C" int parse_file(const char *filename, struct divelog *log)
* Opening the cloud storage repository failed for some reason
* give up here and don't send errors about git repositories
*/
if (info.is_subsurface_cloud)
if (info.is_subsurface_cloud) {
cleanup_git_info(&info);
return -1;
}
}
int ret = git_load_dives(&info, log);
cleanup_git_info(&info);
return ret;
}
@ -314,7 +316,7 @@ extern "C" int parse_file(const char *filename, struct divelog *log)
std::string wl_name = std::string(filename, t - filename) + ".add";
auto [wl_mem, err] = readfile(wl_name.c_str());
if (err < 0) {
report_info("No file %s found. No WLog extensions.", wl_name.c_str());
fprintf(stderr, "No file %s found. No WLog extensions.\n", wl_name.c_str());
wl_mem.clear();
}
return datatrak_import(mem, wl_mem, log);

View File

@ -644,7 +644,7 @@ std::string filter_constraint_data_to_string(const filter_constraint *c)
void filter_constraint_set_stringlist(filter_constraint &c, const QString &s)
{
if (!filter_constraint_is_string(c.type)) {
report_info("Setting strings in non-string constraint!");
fprintf(stderr, "Setting strings in non-string constraint!\n");
return;
}
c.data.string_list->clear();
@ -655,7 +655,7 @@ void filter_constraint_set_stringlist(filter_constraint &c, const QString &s)
void filter_constraint_set_timestamp_from(filter_constraint &c, timestamp_t from)
{
if (!filter_constraint_is_timestamp(c.type)) {
report_info("Setting timestamp from in non-timestamp constraint!");
fprintf(stderr, "Setting timestamp from in non-timestamp constraint!\n");
return;
}
c.data.timestamp_range.from = from;
@ -664,7 +664,7 @@ void filter_constraint_set_timestamp_from(filter_constraint &c, timestamp_t from
void filter_constraint_set_timestamp_to(filter_constraint &c, timestamp_t to)
{
if (!filter_constraint_is_timestamp(c.type)) {
report_info("Setting timestamp to in non-timestamp constraint!");
fprintf(stderr, "Setting timestamp to in non-timestamp constraint!\n");
return;
}
c.data.timestamp_range.to = to;
@ -673,7 +673,7 @@ void filter_constraint_set_timestamp_to(filter_constraint &c, timestamp_t to)
void filter_constraint_set_integer_from(filter_constraint &c, int from)
{
if (!is_numerical_constraint(c.type)) {
report_info("Setting integer from of non-numerical constraint!");
fprintf(stderr, "Setting integer from of non-numerical constraint!\n");
return;
}
c.data.numerical_range.from = from;
@ -682,7 +682,7 @@ void filter_constraint_set_integer_from(filter_constraint &c, int from)
void filter_constraint_set_integer_to(filter_constraint &c, int to)
{
if (!is_numerical_constraint(c.type)) {
report_info("Setting integer to of non-numerical constraint!");
fprintf(stderr, "Setting integer to of non-numerical constraint!\n");
return;
}
c.data.numerical_range.to = to;
@ -691,7 +691,7 @@ void filter_constraint_set_integer_to(filter_constraint &c, int to)
void filter_constraint_set_float_from(filter_constraint &c, double from)
{
if (!is_numerical_constraint(c.type)) {
report_info("Setting float from of non-numerical constraint!");
fprintf(stderr, "Setting float from of non-numerical constraint!\n");
return;
}
c.data.numerical_range.from = display_to_base_unit(from, c.type);
@ -700,7 +700,7 @@ void filter_constraint_set_float_from(filter_constraint &c, double from)
void filter_constraint_set_float_to(filter_constraint &c, double to)
{
if (!is_numerical_constraint(c.type)) {
report_info("Setting float to of non-numerical constraint!");
fprintf(stderr, "Setting float to of non-numerical constraint!\n");
return;
}
c.data.numerical_range.to = display_to_base_unit(to, c.type);
@ -709,7 +709,7 @@ void filter_constraint_set_float_to(filter_constraint &c, double to)
void filter_constraint_set_multiple_choice(filter_constraint &c, uint64_t multiple_choice)
{
if (!filter_constraint_is_multiple_choice(c.type)) {
report_info("Setting multiple-choice to of non-multiple-choice constraint!");
fprintf(stderr, "Setting multiple-choice to of non-multiple-choice constraint!\n");
return;
}
c.data.multiple_choice = multiple_choice;
@ -718,7 +718,7 @@ void filter_constraint_set_multiple_choice(filter_constraint &c, uint64_t multip
QString filter_constraint_get_string(const filter_constraint &c)
{
if (!filter_constraint_is_string(c.type)) {
report_info("Getting string of non-string constraint!");
fprintf(stderr, "Getting string of non-string constraint!\n");
return QString();
}
return c.data.string_list->join(",");
@ -727,7 +727,7 @@ QString filter_constraint_get_string(const filter_constraint &c)
int filter_constraint_get_integer_from(const filter_constraint &c)
{
if (!is_numerical_constraint(c.type)) {
report_info("Getting integer from of non-numerical constraint!");
fprintf(stderr, "Getting integer from of non-numerical constraint!\n");
return -1;
}
return c.data.numerical_range.from;
@ -736,7 +736,7 @@ int filter_constraint_get_integer_from(const filter_constraint &c)
int filter_constraint_get_integer_to(const filter_constraint &c)
{
if (!is_numerical_constraint(c.type)) {
report_info("Getting integer to of non-numerical constraint!");
fprintf(stderr, "Getting integer to of non-numerical constraint!\n");
return -1;
}
return c.data.numerical_range.to;
@ -745,7 +745,7 @@ int filter_constraint_get_integer_to(const filter_constraint &c)
double filter_constraint_get_float_from(const filter_constraint &c)
{
if (!is_numerical_constraint(c.type)) {
report_info("Getting float from of non-numerical constraint!");
fprintf(stderr, "Getting float from of non-numerical constraint!\n");
return 0.0;
}
return base_to_display_unit(c.data.numerical_range.from, c.type);
@ -754,7 +754,7 @@ double filter_constraint_get_float_from(const filter_constraint &c)
double filter_constraint_get_float_to(const filter_constraint &c)
{
if (!is_numerical_constraint(c.type)) {
report_info("Getting float to of non-numerical constraint!");
fprintf(stderr, "Getting float to of non-numerical constraint!\n");
return 0.0;
}
return base_to_display_unit(c.data.numerical_range.to, c.type);
@ -763,7 +763,7 @@ double filter_constraint_get_float_to(const filter_constraint &c)
timestamp_t filter_constraint_get_timestamp_from(const filter_constraint &c)
{
if (!filter_constraint_is_timestamp(c.type)) {
report_info("Getting timestamp from of non-timestamp constraint!");
fprintf(stderr, "Getting timestamp from of non-timestamp constraint!\n");
return 0;
}
return c.data.timestamp_range.from;
@ -772,7 +772,7 @@ timestamp_t filter_constraint_get_timestamp_from(const filter_constraint &c)
timestamp_t filter_constraint_get_timestamp_to(const filter_constraint &c)
{
if (!filter_constraint_is_timestamp(c.type)) {
report_info("Getting timestamp to of non-timestamp constraint!");
fprintf(stderr, "Getting timestamp to of non-timestamp constraint!\n");
return 0;
}
return c.data.timestamp_range.to;
@ -781,7 +781,7 @@ timestamp_t filter_constraint_get_timestamp_to(const filter_constraint &c)
uint64_t filter_constraint_get_multiple_choice(const filter_constraint &c)
{
if (!filter_constraint_is_multiple_choice(c.type)) {
report_info("Getting multiple-choice of non-multiple choice constraint!");
fprintf(stderr, "Getting multiple-choice of non-multiple choice constraint!\n");
return 0;
}
return c.data.multiple_choice;
@ -819,7 +819,7 @@ static bool has_tags(const filter_constraint &c, const struct dive *d)
{
QStringList dive_tags;
for (const tag_entry *tag = d->tag_list; tag; tag = tag->next)
dive_tags.push_back(QString::fromStdString(tag->tag->name).trimmed());
dive_tags.push_back(QString(tag->tag->name).trimmed());
dive_tags.append(gettextFromC::tr(divemode_text_ui[d->dc.divemode]).trimmed());
return check(c, dive_tags);
}
@ -1074,9 +1074,9 @@ bool filter_constraint_match_dive(const filter_constraint &c, const struct dive
case FILTER_CONSTRAINT_SAC:
return check_numerical_range_non_zero(c, d->sac);
case FILTER_CONSTRAINT_LOGGED:
return is_logged(d) != c.negate;
return has_planned(d, false) != c.negate;
case FILTER_CONSTRAINT_PLANNED:
return is_planned(d) != c.negate;
return has_planned(d, true) != c.negate;
case FILTER_CONSTRAINT_DIVE_MODE:
return check_multiple_choice(c, (int)d->dc.divemode); // should we be smarter and check all DCs?
case FILTER_CONSTRAINT_TAGS:

View File

@ -343,30 +343,67 @@ QString vqasprintf_loc(const char *fmt, va_list ap_in)
return ret;
}
// TODO: Avoid back-and-forth conversion between UTF16 and UTF8.
std::string casprintf_loc(const char *cformat, ...)
// Put a formated string respecting the default locale into a C-style array in UTF-8 encoding.
// The only complication arises from the fact that we don't want to cut through multi-byte UTF-8 code points.
extern "C" int snprintf_loc(char *dst, size_t size, const char *cformat, ...)
{
va_list ap;
va_start(ap, cformat);
QByteArray utf8 = vqasprintf_loc(cformat, ap).toUtf8();
va_end(ap);
return std::string(utf8.constData(), utf8.size());
}
std::string __printf(1, 2) format_string_std(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
size_t stringsize = vsnprintf(NULL, 0, fmt, ap);
va_end(ap);
if (stringsize == 0)
return std::string();
std::string res;
res.resize(stringsize); // Pointless clearing, oh my.
// This overwrites the terminal null-byte of std::string.
// That's probably "undefined behavior". Oh my.
va_start(ap, fmt);
vsnprintf(res.data(), stringsize + 1, fmt, ap);
int res = vsnprintf_loc(dst, size, cformat, ap);
va_end(ap);
return res;
}
extern "C" int vsnprintf_loc(char *dst, size_t size, const char *cformat, va_list ap)
{
QByteArray utf8 = vqasprintf_loc(cformat, ap).toUtf8();
const char *data = utf8.constData();
size_t utf8_size = utf8.size();
if (size == 0)
return utf8_size;
if (size < utf8_size + 1) {
memcpy(dst, data, size - 1);
if ((data[size - 1] & 0xC0) == 0x80) {
// We truncated a multi-byte UTF-8 encoding.
--size;
// Jump to last copied byte.
if (size > 0)
--size;
while(size > 0 && (dst[size] & 0xC0) == 0x80)
--size;
dst[size] = 0;
} else {
dst[size - 1] = 0;
}
} else {
memcpy(dst, data, utf8_size + 1); // QByteArray guarantees a trailing 0
}
return utf8_size;
}
int asprintf_loc(char **dst, const char *cformat, ...)
{
va_list ap;
va_start(ap, cformat);
int res = vasprintf_loc(dst, cformat, ap);
va_end(ap);
return res;
}
int vasprintf_loc(char **dst, const char *cformat, va_list ap)
{
QByteArray utf8 = vqasprintf_loc(cformat, ap).toUtf8();
*dst = strdup(utf8.constData());
return utf8.size();
}
extern "C" void put_vformat_loc(struct membuffer *b, const char *fmt, va_list args)
{
QByteArray utf8 = vqasprintf_loc(fmt, args).toUtf8();
const char *data = utf8.constData();
size_t utf8_size = utf8.size();
make_room(b, utf8_size);
memcpy(b->buffer + b->len, data, utf8_size);
b->len += utf8_size;
}

View File

@ -11,8 +11,19 @@
#include <QString>
__printf(1, 2) QString qasprintf_loc(const char *cformat, ...);
__printf(1, 0) QString vqasprintf_loc(const char *cformat, va_list ap);
__printf(1, 2) std::string casprintf_loc(const char *cformat, ...);
__printf(1, 2) std::string format_string_std(const char *fmt, ...);
#endif
#ifdef __cplusplus
extern "C" {
#endif
__printf(3, 4) int snprintf_loc(char *dst, size_t size, const char *cformat, ...);
__printf(3, 0) int vsnprintf_loc(char *dst, size_t size, const char *cformat, va_list ap);
__printf(2, 3) int asprintf_loc(char **dst, const char *cformat, ...);
__printf(2, 0) int vasprintf_loc(char **dst, const char *cformat, va_list ap);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -128,7 +128,7 @@ static std::vector<QString> getWords(const dive *d)
tokenize(QString(d->buddy), res);
tokenize(QString(d->suit), res);
for (const tag_entry *tag = d->tag_list; tag; tag = tag->next)
tokenize(QString::fromStdString(tag->tag->name), res);
tokenize(QString(tag->tag->name), res);
for (int i = 0; i < d->cylinders.nr; ++i) {
const cylinder_t &cyl = *get_cylinder(d, i);
tokenize(QString(cyl.type.description), res);

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "gas.h"
#include "pref.h"
#include "errorhelper.h"
#include "gettext.h"
#include <stdio.h>
#include <string.h>
@ -60,7 +59,7 @@ void sanitize_gasmix(struct gasmix *mix)
/* Sane mix? */
if (o2 <= 1000 && he <= 1000 && o2 + he <= 1000)
return;
report_info("Odd gasmix: %u O2 %u He", o2, he);
fprintf(stderr, "Odd gasmix: %u O2 %u He\n", o2, he);
memset(mix, 0, sizeof(*mix));
}
@ -118,7 +117,7 @@ int pscr_o2(const double amb_pressure, struct gasmix mix)
* *pressures = structure for communicating o2 sensor values from and gas pressures to the calling function.
* *mix = structure containing cylinder gas mixture information.
* divemode = the dive mode pertaining to this point in the dive profile.
* This function called by: calculate_gas_information_new() in profile.cpp; add_segment() in deco.cpp.
* This function called by: calculate_gas_information_new() in profile.c; add_segment() in deco.c.
*/
void fill_pressures(struct gas_pressures *pressures, const double amb_pressure, struct gasmix mix, double po2, enum divemode_t divemode)
{

View File

@ -2,9 +2,9 @@
/* gaspressures.c
* ---------------
* This file contains the routines to calculate the gas pressures in the cylinders.
* The functions below support the code in profile.cpp.
* The functions below support the code in profile.c.
* The high-level function is populate_pressure_information(), called by function
* create_plot_info_new() in profile.cpp. The other functions below are, in turn,
* create_plot_info_new() in profile.c. The other functions below are, in turn,
* called by populate_pressure_information(). The calling sequence is as follows:
*
* populate_pressure_information() -> calc_pressure_time()
@ -102,8 +102,8 @@ static void dump_pr_track(int cyl, pr_track_t *track_pr)
printf(" start %f end %f t_start %d:%02d t_end %d:%02d pt %d\n",
mbar_to_PSI(list->start),
mbar_to_PSI(list->end),
FRACTION_TUPLE(list->t_start, 60),
FRACTION_TUPLE(list->t_end, 60),
FRACTION(list->t_start, 60),
FRACTION(list->t_end, 60),
list->pressure_time);
list = list->next;
}
@ -362,7 +362,7 @@ static void debug_print_pressures(struct plot_info *pi)
* pr_track_alloc structures for each cylinder. These pr_track_alloc structures ultimately allow for filling
* the missing tank pressure values on the dive profile using the depth_pressure of the dive. To do this, it
* calculates the summed pressure-time value for the duration of the dive and stores these * in the pr_track_alloc
* structures. This function is called by create_plot_info_new() in profile.cpp
* structures. This function is called by create_plot_info_new() in profile.c
*/
void populate_pressure_information(const struct dive *dive, const struct divecomputer *dc, struct plot_info *pi, int sensor)
{

View File

@ -17,13 +17,10 @@
#include <fcntl.h>
#include <stdarg.h>
#include <git2.h>
#include <QString>
#include <QRegularExpression>
#include <QNetworkProxy>
#include "subsurface-string.h"
#include "format.h"
#include "membuffer.h"
#include "strndup.h"
#include "qthelper.h"
#include "file.h"
#include "errorhelper.h"
@ -120,20 +117,12 @@ static int transfer_progress_cb(const git_transfer_progress *stats, void *)
// the initial push to sync the repos is mapped to 10% of overall progress
static int push_transfer_progress_cb(unsigned int current, unsigned int total, size_t, void *)
{
std::string buf = casprintf_loc(translate("gettextFromC", "Transfer to storage (%d/%d)"), current, total);
return git_storage_update_progress(buf.c_str());
char buf[80];
snprintf(buf, sizeof(buf), translate("gettextFromC", "Transfer to storage (%d/%d)"), current, total);
return git_storage_update_progress(buf);
}
std::string normalize_cloud_name(const std::string &remote_in)
{
// replace ssrf-cloud-XX.subsurface... names with cloud.subsurface... names
// that trailing '/' is to match old code
QString ri = QString::fromStdString(remote_in);
ri.replace(QRegularExpression(CLOUD_HOST_PATTERN), CLOUD_HOST_GENERIC "/");
return ri.toStdString();
}
std::string get_local_dir(const std::string &url, const std::string &branch)
extern "C" char *get_local_dir(const char *url, const char *branch)
{
SHA_CTX ctx;
unsigned char hash[20];
@ -144,6 +133,9 @@ std::string get_local_dir(const std::string &url, const std::string &branch)
// which server to pick changed, or because the user is on a different continent),
// then the hash and therefore the local directory would change. To prevent that
// from happening, normalize the cloud string to always use the old default name.
// That's trivial with QString operations and painful to do right in plain C, so
// let's be lazy and call a C++ helper function
// just remember to free the string we get back
std::string remote = normalize_cloud_name(url);
// That zero-byte update is so that we don't get hash
@ -151,18 +143,19 @@ std::string get_local_dir(const std::string &url, const std::string &branch)
SHA1_Init(&ctx);
SHA1_Update(&ctx, remote.c_str(), remote.size());
SHA1_Update(&ctx, "", 1);
SHA1_Update(&ctx, branch.c_str(), branch.size());
SHA1_Update(&ctx, branch, strlen(branch));
SHA1_Final(hash, &ctx);
return format_string_std("%s/cloudstorage/%02x%02x%02x%02x%02x%02x%02x%02x",
return format_string("%s/cloudstorage/%02x%02x%02x%02x%02x%02x%02x%02x",
system_default_directory(),
hash[0], hash[1], hash[2], hash[3],
hash[4], hash[5], hash[6], hash[7]);
}
static std::string move_local_cache(struct git_info *info)
static char *move_local_cache(struct git_info *info)
{
std::string old_path = get_local_dir(info->url, info->branch);
std::string new_path = move_away(old_path);
char *old_path = get_local_dir(info->url, info->branch);
char *new_path = move_away(old_path);
free(old_path);
return new_path;
}
@ -255,12 +248,15 @@ extern "C" int credential_ssh_cb(git_cred **out,
// TODO: We need a way to differentiate between password and private key authentication
if (allowed_types & GIT_CREDTYPE_SSH_KEY) {
std::string priv_key = std::string(system_default_directory()) + "/ssrf_remote.key";
if (!access(priv_key.c_str(), F_OK)) {
char *priv_key = format_string("%s/%s", system_default_directory(), "ssrf_remote.key");
if (!access(priv_key, F_OK)) {
if (exceeded_auth_attempts())
return GIT_EUSER;
return git_cred_ssh_key_new(out, username, NULL, priv_key.c_str(), passphrase);
int ret = git_cred_ssh_key_new(out, username, NULL, priv_key, passphrase);
free(priv_key);
return ret;
}
free(priv_key);
}
if (allowed_types & GIT_CREDTYPE_USERPASS_PLAINTEXT) {
@ -350,7 +346,7 @@ static int try_to_git_merge(struct git_info *info, git_reference **local_p, git_
git_commit *local_commit, *remote_commit, *base_commit;
git_index *merged_index;
git_merge_options merge_options;
struct membufferpp msg;
struct membuffer msg = { 0, 0, NULL};
if (verbose) {
char outlocal[41], outremote[41];
@ -454,6 +450,7 @@ static int try_to_git_merge(struct git_info *info, git_reference **local_p, git_
git_signature_free(author);
if (verbose)
report_info("git storage: successfully merged repositories");
free_buffer(&msg);
return 0;
}
@ -461,6 +458,7 @@ diverged_error:
return report_error("%s", translate("gettextFromC", "Remote storage and local data diverged"));
write_error:
free_buffer(&msg);
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: writing the data failed (%s)"), giterr_last()->message);
}
@ -469,10 +467,11 @@ write_error:
// and ask them to retry the operation (which will then refresh the data from the cloud server)
static int cleanup_local_cache(struct git_info *info)
{
std::string backup_path = move_local_cache(info);
report_info("git storage: problems with local cache, moved to %s", backup_path.c_str());
char *backup_path = move_local_cache(info);
report_info("git storage: problems with local cache, moved to %s", backup_path);
report_error("%s", translate("gettextFromC", "Problems with local cache of Subsurface cloud data"));
report_error(translate("gettextFromC", "Moved cache data to %s. Please try the operation again."), backup_path.c_str());
report_error(translate("gettextFromC", "Moved cache data to %s. Please try the operation again."), backup_path);
free(backup_path);
return -1;
}
@ -577,17 +576,17 @@ static int check_remote_status(struct git_info *info, git_remote *origin)
if (verbose)
report_info("git storage: check remote status\n");
if (git_branch_lookup(&local_ref, info->repo, info->branch.c_str(), GIT_BRANCH_LOCAL)) {
report_info("git storage: branch %s is missing in local repo", info->branch.c_str());
if (git_branch_lookup(&local_ref, info->repo, info->branch, GIT_BRANCH_LOCAL)) {
report_info("git storage: branch %s is missing in local repo", info->branch);
if (info->is_subsurface_cloud)
return cleanup_local_cache(info);
else
return report_error("Git cache branch %s no longer exists", info->branch.c_str());
return report_error("Git cache branch %s no longer exists", info->branch);
}
if (git_branch_upstream(&remote_ref, local_ref)) {
/* so there is no upstream branch for our branch; that's a problem.
* let's push our branch */
report_info("git storage: branch %s is missing in remote, pushing branch", info->branch.c_str());
report_info("git storage: branch %s is missing in remote, pushing branch", info->branch);
git_strarray refspec;
git_reference_list(&refspec, info->repo);
git_push_options opts = GIT_PUSH_OPTIONS_INIT;
@ -609,69 +608,62 @@ static int check_remote_status(struct git_info *info, git_remote *origin)
return error;
}
static std::string getProxyString()
{
if (prefs.proxy_type == QNetworkProxy::HttpProxy) {
if (prefs.proxy_auth)
return format_string_std("http://%s:%s@%s:%d", prefs.proxy_user, prefs.proxy_pass,
prefs.proxy_host, prefs.proxy_port);
else
return format_string_std("http://%s:%d", prefs.proxy_host, prefs.proxy_port);
}
return std::string();
}
/* this is (so far) only used by the git storage tests to remove a remote branch
* it will print out errors, but not return an error (as this isn't a function that
* we test as part of the tests, it's a helper to not leave loads of dead branches on
* the server)
*/
void delete_remote_branch(git_repository *repo, const std::string &remote, const std::string &branch)
extern "C" void delete_remote_branch(git_repository *repo, const char *remote, const char *branch)
{
int error;
char *proxy_string;
git_remote *origin;
git_config *conf;
/* set up the config and proxy information in order to connect to the server */
git_repository_config(&conf, repo);
std::string proxy_string = getProxyString();
if (!proxy_string.empty()) {
git_config_set_string(conf, "http.proxy", proxy_string.c_str());
if (getProxyString(&proxy_string)) {
git_config_set_string(conf, "http.proxy", proxy_string);
free(proxy_string);
} else {
git_config_delete_entry(conf, "http.proxy");
}
if (git_remote_lookup(&origin, repo, "origin")) {
report_info("git storage: repository '%s' origin lookup failed (%s)", remote.c_str(), giterr_last() ? giterr_last()->message : "(unspecified)");
report_info("git storage: repository '%s' origin lookup failed (%s)", remote, giterr_last() ? giterr_last()->message : "(unspecified)");
return;
}
/* fetch the remote state */
git_fetch_options f_opts = GIT_FETCH_OPTIONS_INIT;
auth_attempt = 0;
f_opts.callbacks.credentials = credential_https_cb;
if (git_remote_fetch(origin, NULL, &f_opts, NULL)) {
error = git_remote_fetch(origin, NULL, &f_opts, NULL);
if (error) {
report_info("git storage: remote fetch failed (%s)\n", giterr_last() ? giterr_last()->message : "authentication failed");
return;
}
/* delete the remote branch by pushing to ":refs/heads/<branch>" */
git_strarray refspec;
std::string branch_ref = std::string(":refs/heads/") + branch;
char *dummy = branch_ref.data();
char *branch_ref = format_string(":refs/heads/%s", branch);
refspec.count = 1;
refspec.strings = &dummy;
refspec.strings = &branch_ref;
git_push_options p_opts = GIT_PUSH_OPTIONS_INIT;
auth_attempt = 0;
p_opts.callbacks.credentials = credential_https_cb;
if (git_remote_push(origin, &refspec, &p_opts)) {
report_info("git storage: unable to delete branch '%s'", branch.c_str());
error = git_remote_push(origin, &refspec, &p_opts);
free(branch_ref);
if (error) {
report_info("git storage: unable to delete branch '%s'", branch);
report_info("git storage: error was (%s)\n", giterr_last() ? giterr_last()->message : "(unspecified)");
}
git_remote_free(origin);
return;
}
int sync_with_remote(struct git_info *info)
extern "C" int sync_with_remote(struct git_info *info)
{
int error;
git_remote *origin;
char *proxy_string;
git_config *conf;
if (git_local_only) {
@ -680,14 +672,14 @@ int sync_with_remote(struct git_info *info)
return 0;
}
if (verbose)
report_info("git storage: sync with remote %s[%s]\n", info->url.c_str(), info->branch.c_str());
report_info("git storage: sync with remote %s[%s]\n", info->url, info->branch);
git_storage_update_progress(translate("gettextFromC", "Sync with cloud storage"));
git_repository_config(&conf, info->repo);
std::string proxy_string = getProxyString();
if (info->transport == RT_HTTPS && !proxy_string.empty()) {
if (info->transport == RT_HTTPS && getProxyString(&proxy_string)) {
if (verbose)
report_info("git storage: set proxy to \"%s\"\n", proxy_string.c_str());
git_config_set_string(conf, "http.proxy", proxy_string.c_str());
report_info("git storage: set proxy to \"%s\"\n", proxy_string);
git_config_set_string(conf, "http.proxy", proxy_string);
free(proxy_string);
} else {
if (verbose)
report_info("git storage: delete proxy setting\n");
@ -701,9 +693,9 @@ int sync_with_remote(struct git_info *info)
error = git_remote_lookup(&origin, info->repo, "origin");
if (error) {
const char *msg = giterr_last()->message;
report_info("git storage: repo %s origin lookup failed with: %s", info->url.c_str(), msg);
report_info("git storage: repo %s origin lookup failed with: %s", info->url, msg);
if (!info->is_subsurface_cloud)
report_error("Repository '%s' origin lookup failed (%s)", info->url.c_str(), msg);
report_error("Repository '%s' origin lookup failed (%s)", info->url, msg);
return 0;
}
@ -734,7 +726,7 @@ int sync_with_remote(struct git_info *info)
if (info->is_subsurface_cloud)
report_error("Cannot sync with cloud server, working with offline copy");
else
report_error("Unable to fetch remote '%s'", info->url.c_str());
report_error("Unable to fetch remote '%s'", info->url);
// If we returned GIT_EUSER during authentication, giterr_last() returns NULL
report_info("git storage: remote fetch failed (%s)\n", giterr_last() ? giterr_last()->message : "authentication failed");
// Since we failed to sync with online repository, enter offline mode
@ -756,17 +748,18 @@ static bool update_local_repo(struct git_info *info)
if (!git_repository_head(&head, info->repo)) {
const char *name;
if (!git_branch_name(&name, head)) {
if (info->branch != name) {
std::string branchref = "refs/heads/" + info->branch;
report_info("git storage: setting cache branch from '%s' to '%s'", name, info->branch.c_str());
git_repository_set_head(info->repo, branchref.c_str());
if (strcmp(name, info->branch)) {
char *branchref = format_string("refs/heads/%s", info->branch);
report_info("git storage: setting cache branch from '%s' to '%s'", name, info->branch);
git_repository_set_head(info->repo, branchref);
free(branchref);
}
}
git_reference_free(head);
}
/* make sure we have the correct origin - the cloud server URL could have changed */
if (git_remote_set_url(info->repo, "origin", info->url.c_str())) {
report_info("git storage: failed to update origin to '%s'", info->url.c_str());
if (git_remote_set_url(info->repo, "origin", info->url)) {
report_info("git storage: failed to update origin to '%s'", info->url);
return false;
}
@ -778,6 +771,7 @@ static bool update_local_repo(struct git_info *info)
static int repository_create_cb(git_repository **out, const char *path, int bare, void *)
{
char *proxy_string;
git_config *conf;
int ret = git_repository_init(out, path, bare);
@ -788,11 +782,11 @@ static int repository_create_cb(git_repository **out, const char *path, int bare
}
git_repository_config(&conf, *out);
std::string proxy_string = getProxyString();
if (!proxy_string.empty()) {
if (getProxyString(&proxy_string)) {
if (verbose)
report_info("git storage: set proxy to \"%s\"\n", proxy_string.c_str());
git_config_set_string(conf, "http.proxy", proxy_string.c_str());
report_info("git storage: set proxy to \"%s\"\n", proxy_string);
git_config_set_string(conf, "http.proxy", proxy_string);
free(proxy_string);
} else {
if (verbose)
report_info("git storage: delete proxy setting\n");
@ -806,30 +800,34 @@ static int repository_create_cb(git_repository **out, const char *path, int bare
static bool create_and_push_remote(struct git_info *info)
{
git_config *conf;
char *variable_name, *head;
if (verbose)
report_info("git storage: create and push remote\n");
/* first make sure the directory for the local cache exists */
subsurface_mkdir(info->localdir.c_str());
subsurface_mkdir(info->localdir);
std::string head = "refs/heads/" + info->branch;
head = format_string("refs/heads/%s", info->branch);
/* set up the origin to point to our remote */
git_repository_init_options init_opts = GIT_REPOSITORY_INIT_OPTIONS_INIT;
init_opts.origin_url = info->url.c_str();
init_opts.initial_head = head.c_str();
init_opts.origin_url = info->url;
init_opts.initial_head = head;
/* now initialize the repository with */
git_repository_init_ext(&info->repo, info->localdir.c_str(), &init_opts);
git_repository_init_ext(&info->repo, info->localdir, &init_opts);
/* create a config so we can set the remote tracking branch */
git_repository_config(&conf, info->repo);
std::string variable_name = "branch." + info->branch + ".remote";
git_config_set_string(conf, variable_name.c_str(), "origin");
variable_name = format_string("branch.%s.remote", info->branch);
git_config_set_string(conf, variable_name, "origin");
free(variable_name);
variable_name = "branch." + info->branch + ".merge";
git_config_set_string(conf, variable_name.c_str(), head.c_str());
variable_name = format_string("branch.%s.merge", info->branch);
git_config_set_string(conf, variable_name, head);
free(head);
free(variable_name);
/* finally create an empty commit and push it to the remote */
if (do_git_save(info, false, true))
@ -855,18 +853,18 @@ static bool create_local_repo(struct git_info *info)
opts.repository_cb = repository_create_cb;
opts.fetch_opts.callbacks.certificate_check = certificate_check_cb;
opts.checkout_branch = info->branch.c_str();
opts.checkout_branch = info->branch;
if (info->is_subsurface_cloud && !canReachCloudServer(info)) {
report_info("git storage: cannot reach remote server");
return false;
}
if (verbose > 1)
report_info("git storage: calling git_clone()\n");
error = git_clone(&info->repo, info->url.c_str(), info->localdir.c_str(), &opts);
error = git_clone(&info->repo, info->url, info->localdir, &opts);
if (verbose > 1)
report_info("git storage: returned from git_clone() with return value %d\n", error);
if (error) {
report_info("git storage: clone of %s failed", info->url.c_str());
report_info("git storage: clone of %s failed", info->url);
const char *msg = "";
if (giterr_last()) {
msg = giterr_last()->message;
@ -874,9 +872,9 @@ static bool create_local_repo(struct git_info *info)
} else {
report_info("git storage: giterr_last() is null\n");
}
std::string pattern = format_string_std("reference 'refs/remotes/origin/%s' not found", info->branch.c_str());
char *pattern = format_string("reference 'refs/remotes/origin/%s' not found", info->branch);
// it seems that we sometimes get 'Reference' and sometimes 'reference'
if (includes_string_caseinsensitive(msg, pattern.c_str())) {
if (includes_string_caseinsensitive(msg, pattern)) {
/* we're trying to open the remote branch that corresponds
* to our cloud storage and the branch doesn't exist.
* So we need to create the branch and push it to the remote */
@ -889,18 +887,19 @@ static bool create_local_repo(struct git_info *info)
report_error("%s", translate("gettextFromC", "Error connecting to Subsurface cloud storage"));
#endif
} else {
report_error(translate("gettextFromC", "git clone of %s failed (%s)"), info->url.c_str(), msg);
report_error(translate("gettextFromC", "git clone of %s failed (%s)"), info->url, msg);
}
free(pattern);
}
return !error;
}
static enum remote_transport url_to_remote_transport(const std::string &remote)
static enum remote_transport url_to_remote_transport(const char *remote)
{
/* figure out the remote transport */
if (starts_with(remote, "ssh://"))
if (strncmp(remote, "ssh://", 6) == 0)
return RT_SSH;
else if (starts_with(remote.c_str(), "https://"))
else if (strncmp(remote, "https://", 8) == 0)
return RT_HTTPS;
else
return RT_OTHER;
@ -911,24 +910,24 @@ static bool get_remote_repo(struct git_info *info)
struct stat st;
if (verbose > 1) {
report_info("git storage: accessing %s\n", info->url.c_str());
report_info("git storage: accessing %s\n", info->url);
}
git_storage_update_progress(translate("gettextFromC", "Synchronising data file"));
/* Do we already have a local cache? */
if (!subsurface_stat(info->localdir.c_str(), &st)) {
if (!subsurface_stat(info->localdir, &st)) {
int error;
if (verbose)
report_info("git storage: update local repo\n");
error = git_repository_open(&info->repo, info->localdir.c_str());
error = git_repository_open(&info->repo, info->localdir);
if (error) {
const char *msg = giterr_last()->message;
report_info("git storage: unable to open local cache at %s: %s", info->localdir.c_str(), msg);
report_info("git storage: unable to open local cache at %s: %s", info->localdir, msg);
if (info->is_subsurface_cloud)
(void)cleanup_local_cache(info);
else
report_error("Unable to open git cache repository at %s: %s", info->localdir.c_str(), msg);
report_error("Unable to open git cache repository at %s: %s", info->localdir, msg);
return false;
}
@ -956,17 +955,17 @@ static bool get_remote_repo(struct git_info *info)
* Remove the user name from the url if it exists, and
* save it in 'info->username'.
*/
std::string extract_username(struct git_info *info, const std::string &url)
static void extract_username(struct git_info *info, char *url)
{
char c;
const char *p = url.c_str();
char *p = url;
while ((c = *p++) >= 'a' && c <= 'z')
/* nothing */;
if (c != ':')
return url;
return;
if (*p++ != '/' || *p++ != '/')
return url;
return;
/*
* Ok, we found "[a-z]*://" and we think we have a real
@ -975,38 +974,38 @@ std::string extract_username(struct git_info *info, const std::string &url)
*/
info->transport = url_to_remote_transport(url);
const char *at = strchr(p, '@');
char *at = strchr(p, '@');
if (!at)
return url;
return;
/* was this the @ that denotes an account? that means it was before the
* first '/' after the protocol:// - so let's find a '/' after that and compare */
const char *slash = strchr(p, '/');
char *slash = strchr(p, '/');
if (!slash || at > slash)
return url;
return;
/* grab the part between "protocol://" and "@" as encoded email address
* (that's our username) and move the rest of the URL forward, remembering
* to copy the closing NUL as well */
info->username = std::string(p, at - p);
info->username = strndup(p, at - p);
memmove(p, at + 1, strlen(at + 1) + 1);
/*
* Ugly, ugly. Parsing the remote repo user name also sets
* it in the preferences. We should do this somewhere else!
*/
prefs.cloud_storage_email_encoded = strdup(info->username.c_str());
return url.substr(at + 1 - url.c_str());
prefs.cloud_storage_email_encoded = strdup(info->username);
}
git_info::git_info() : repo(nullptr), is_subsurface_cloud(0), transport(RT_LOCAL)
extern "C" void cleanup_git_info(struct git_info *info)
{
}
git_info::~git_info()
{
if (repo)
git_repository_free(repo);
if (info->repo)
git_repository_free(info->repo);
free((void *)info->url);
free((void *)info->branch);
free((void *)info->username);
free((void *)info->localdir);
memset(info, 0, sizeof(*info));
}
/*
@ -1019,14 +1018,16 @@ git_info::~git_info()
* https://host/repo[branch]
* file://repo[branch]
*/
bool is_git_repository(const char *filename, struct git_info *info)
extern "C" bool is_git_repository(const char *filename, struct git_info *info)
{
int flen, blen;
int offset = 1;
char *url, *branch;
/* we are looking at a new potential remote, but we haven't synced with it */
git_remote_sync_successful = false;
memset(info, 0, sizeof(*info));
info->transport = RT_LOCAL;
flen = strlen(filename);
if (!flen || filename[--flen] != ']')
@ -1069,11 +1070,11 @@ bool is_git_repository(const char *filename, struct git_info *info)
* The actual git reading/writing routines can use this
* to generate proper error messages.
*/
std::string url(filename, flen);
std::string branch(filename + flen + offset, blen);
url = format_string("%.*s", flen, filename);
branch = format_string("%.*s", blen, filename + flen + offset);
/* Extract the username from the url string */
url = extract_username(info, url);
extract_username(info, url);
info->url = url;
info->branch = branch;
@ -1098,10 +1099,10 @@ bool is_git_repository(const char *filename, struct git_info *info)
*/
switch (info->transport) {
case RT_LOCAL:
info->localdir = url;
info->localdir = strdup(url);
break;
default:
info->localdir = get_local_dir(info->url.c_str(), info->branch).c_str();
info->localdir = get_local_dir(info->url, info->branch);
break;
}
@ -1111,19 +1112,19 @@ bool is_git_repository(const char *filename, struct git_info *info)
*
* This is used to create more user friendly error message and warnings.
*/
info->is_subsurface_cloud = (strstr(info->url.c_str(), prefs.cloud_base_url) != NULL);
info->is_subsurface_cloud = (strstr(info->url, prefs.cloud_base_url) != NULL);
return true;
}
bool open_git_repository(struct git_info *info)
extern "C" bool open_git_repository(struct git_info *info)
{
/*
* If the repository is local, just open it. There's nothing
* else to do.
*/
if (info->transport == RT_LOCAL) {
const char *url = info->localdir.c_str();
const char *url = info->localdir;
if (git_repository_open(&info->repo, url)) {
if (verbose)
@ -1144,13 +1145,15 @@ bool open_git_repository(struct git_info *info)
return get_remote_repo(info);
}
int git_create_local_repo(const std::string &filename)
extern "C" int git_create_local_repo(const char *filename)
{
git_repository *repo;
auto idx = filename.find('[');
std::string path = filename.substr(0, idx);
int ret = git_repository_init(&repo, path.c_str(), false);
char *path = strdup(filename);
char *branch = strchr(path, '[');
if (branch)
*branch = '\0';
int ret = git_repository_init(&repo, path, false);
free(path);
if (ret != 0)
(void)report_error("Create local repo failed with error code %d", ret);
git_repository_free(repo);

View File

@ -22,38 +22,20 @@ extern "C" {
enum remote_transport { RT_LOCAL, RT_HTTPS, RT_SSH, RT_OTHER };
extern bool git_local_only;
extern bool git_remote_sync_successful;
extern void clear_git_id(void);
extern void set_git_id(const struct git_oid *);
void set_git_update_cb(int(*)(const char *));
int git_storage_update_progress(const char *text);
int get_authorship(git_repository *repo, git_signature **authorp);
#ifdef __cplusplus
}
#include <string>
struct git_oid;
struct git_repository;
struct divelog;
struct git_info {
std::string url;
std::string branch;
std::string username;
std::string localdir;
const char *url;
const char *branch;
const char *username;
const char *localdir;
struct git_repository *repo;
unsigned is_subsurface_cloud:1;
enum remote_transport transport;
git_info();
~git_info();
};
extern std::string saved_git_id;
extern std::string get_sha(git_repository *repo, const std::string &branch);
extern std::string get_local_dir(const std::string &, const std::string &);
extern bool is_git_repository(const char *filename, struct git_info *info);
extern bool open_git_repository(struct git_info *info);
extern bool remote_repo_uptodate(const char *filename, struct git_info *info);
@ -61,7 +43,23 @@ extern int sync_with_remote(struct git_info *);
extern int git_save_dives(struct git_info *, bool select_only);
extern int git_load_dives(struct git_info *, struct divelog *log);
extern int do_git_save(struct git_info *, bool select_only, bool create_empty);
extern int git_create_local_repo(const std::string &filename);
extern void cleanup_git_info(struct git_info *);
extern bool git_local_only;
extern bool git_remote_sync_successful;
extern void clear_git_id(void);
extern void set_git_id(const struct git_oid *);
void set_git_update_cb(int(*)(const char *));
int git_storage_update_progress(const char *text);
char *get_local_dir(const char *, const char *);
int git_create_local_repo(const char *filename);
int get_authorship(git_repository *repo, git_signature **authorp);
#ifdef __cplusplus
}
#include <string>
extern std::string saved_git_id;
extern std::string get_sha(git_repository *repo, const char *branch);
#endif
#endif // GITACCESS_H

View File

@ -2,7 +2,6 @@
#include "dive.h"
#include "metrics.h"
#include "divelist.h"
#include "errorhelper.h"
#include "qthelper.h"
#include "imagedownloader.h"
#include "videoframeextractor.h"
@ -57,7 +56,7 @@ void ImageDownloader::saveImage(QNetworkReply *reply)
hash.addData(filename.toUtf8());
QFile imageFile(path.append("/").append(hash.result().toHex()));
if (imageFile.open(QIODevice::WriteOnly)) {
report_info("Write image to %s", qPrintable(imageFile.fileName()));
qDebug() << "Write image to" << imageFile.fileName();
QDataStream stream(&imageFile);
stream.writeRawData(imageData.data(), imageData.length());
imageFile.waitForBytesWritten(-1);

View File

@ -8,7 +8,6 @@
#include "ssrf.h"
#include "dive.h"
#include "divesite.h"
#include "errorhelper.h"
#include "gas.h"
#include "parse.h"
#include "sample.h"
@ -148,35 +147,35 @@ static int cobalt_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_cylinder_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_buffer, &cobalt_cylinders, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query cobalt_cylinders failed.");
fprintf(stderr, "%s", "Database query cobalt_cylinders failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_buddy_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_buffer, &cobalt_buddies, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query cobalt_buddies failed.");
fprintf(stderr, "%s", "Database query cobalt_buddies failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_visibility_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_buffer, &cobalt_visibility, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query cobalt_visibility failed.");
fprintf(stderr, "%s", "Database query cobalt_visibility failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_location_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_buffer, &cobalt_location, &location, NULL);
if (retval != SQLITE_OK) {
report_info("Database query cobalt_location failed.");
fprintf(stderr, "%s", "Database query cobalt_location failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_site_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_buffer, &cobalt_location, &location_site, NULL);
if (retval != SQLITE_OK) {
report_info("Database query cobalt_location (site) failed.");
fprintf(stderr, "%s", "Database query cobalt_location (site) failed.\n");
return 1;
}
@ -197,7 +196,7 @@ static int cobalt_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_profile_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_buffer, &cobalt_profile_sample, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query cobalt_profile_sample failed.");
fprintf(stderr, "%s", "Database query cobalt_profile_sample failed.\n");
return 1;
}
@ -220,7 +219,7 @@ extern "C" int parse_cobalt_buffer(sqlite3 *handle, const char *url, const char
retval = sqlite3_exec(handle, get_dives, &cobalt_dive, &state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query failed '%s'.\n", url);
fprintf(stderr, "Database query failed '%s'.\n", url);
return 1;
}

View File

@ -10,7 +10,6 @@
#include "divelist.h"
#include "divelog.h"
#include "file.h"
#include "format.h"
#include "parse.h"
#include "sample.h"
#include "divelist.h"
@ -101,7 +100,7 @@ static char *parse_dan_new_line(char *buf, const char *NL)
if (iter) {
iter += strlen(NL);
} else {
report_info("DEBUG: No new line found");
fprintf(stderr, "DEBUG: No new line found\n");
return NULL;
}
return iter;
@ -129,7 +128,7 @@ static int parse_dan_format(const char *filename, struct xml_params *params, str
} else if ((ptr = strstr(mem.data(), "\n")) != NULL) {
NL = "\n";
} else {
report_info("DEBUG: failed to detect NL");
fprintf(stderr, "DEBUG: failed to detect NL\n");
return -1;
}
@ -145,7 +144,7 @@ static int parse_dan_format(const char *filename, struct xml_params *params, str
xml_params_add(params, "diveNro", tmpbuf);
}
//report_info("DEBUG: BEGIN end_ptr %d round %d <%s>", end_ptr, j++, ptr);
//fprintf(stderr, "DEBUG: BEGIN end_ptr %d round %d <%s>\n", end_ptr, j++, ptr);
iter = ptr + 1;
for (i = 0; i <= 4 && iter; ++i) {
iter = strchr(iter, '|');
@ -154,7 +153,7 @@ static int parse_dan_format(const char *filename, struct xml_params *params, str
}
if (!iter) {
report_info("DEBUG: Data corrupt");
fprintf(stderr, "DEBUG: Data corrupt");
return -1;
}
@ -215,7 +214,7 @@ static int parse_dan_format(const char *filename, struct xml_params *params, str
/* After ZDH we should get either ZDT (above) or ZDP */
if (strncmp(iter, "ZDP{", 4) != 0) {
report_info("DEBUG: Input appears to violate DL7 specification");
fprintf(stderr, "DEBUG: Input appears to violate DL7 specification\n");
end_ptr = iter - mem.data();
continue;
}
@ -237,7 +236,7 @@ static int parse_dan_format(const char *filename, struct xml_params *params, str
if (ptr) {
*ptr = 0;
} else {
report_info("DEBUG: failed to find end ZDP");
fprintf(stderr, "DEBUG: failed to find end ZDP\n");
return -1;
}
mem_csv.resize(ptr - mem_csv.data());
@ -314,11 +313,10 @@ extern "C" int parse_csv_file(const char *filename, struct xml_params *params, c
#ifndef SUBSURFACE_MOBILE
if (verbose >= 2) {
std::string info = format_string_std("(echo '<csv>'; cat %s;echo '</csv>') | xsltproc ", filename);
fprintf(stderr, "(echo '<csv>'; cat %s;echo '</csv>') | xsltproc ", filename);
for (int i = 0; i < xml_params_count(params); i++)
info += format_string_std("--stringparam %s %s ", xml_params_get_key(params, i), xml_params_get_value(params, i));
info += format_string_std("%s/xslt/%s -", SUBSURFACE_SOURCE, csvtemplate);
report_info("%s", info.c_str());
fprintf(stderr, "--stringparam %s %s ", xml_params_get_key(params, i), xml_params_get_value(params, i));
fprintf(stderr, "%s/xslt/%s -\n", SUBSURFACE_SOURCE, csvtemplate);
}
#endif
ret = parse_xml_buffer(filename, mem.data(), mem.size(), log, params);
@ -387,11 +385,8 @@ static int try_to_xslt_open_csv(const char *filename, std::string &mem, const ch
memcpy(ptr_out, tag, tag_name_size);
*--ptr_out = '<';
// On Windows, ptrdiff_t is long long int, on Linux it is long int.
// Windows doesn't support the ptrdiff_t format specifier "%td", so
// let's cast to long int.
if (ptr_out != mem.data())
report_info("try_to_xslt_open_csv(): ptr_out off by %ld. This shouldn't happen", static_cast<long int>(ptr_out - mem.data()));
fprintf(stderr, "try_to_xslt_open_csv(): ptr_out off by %ld. This shouldn't happen\n", ptr_out - mem.data());
return 0;
}
@ -724,7 +719,7 @@ int parse_txt_file(const char *filename, const char *csv, struct divelog *log)
case EOF:
break;
default:
report_info("Unable to parse input: %s\n", lineptr);
printf("Unable to parse input: %s\n", lineptr);
break;
}
@ -891,11 +886,10 @@ static int parse_seabear_csv_file(const char *filename, struct xml_params *param
*/
if (verbose >= 2) {
std::string info = "xsltproc ";
fprintf(stderr, "xsltproc ");
for (i = 0; i < xml_params_count(params); i++)
info += format_string_std("--stringparam %s %s ", xml_params_get_key(params, i), xml_params_get_value(params, i));
info += "xslt/csv2xml.xslt";
report_info("%s", info.c_str());
fprintf(stderr, "--stringparam %s %s ", xml_params_get_key(params, i), xml_params_get_value(params, i));
fprintf(stderr, "xslt/csv2xml.xslt\n");
}
ret = parse_xml_buffer(filename, mem.data(), mem.size(), log, params);
@ -932,11 +926,10 @@ int parse_manual_file(const char *filename, struct xml_params *params, struct di
#ifndef SUBSURFACE_MOBILE
if (verbose >= 2) {
std::string info = format_string_std("(echo '<manualCSV>'; cat %s;echo '</manualCSV>') | xsltproc ", filename);
fprintf(stderr, "(echo '<manualCSV>'; cat %s;echo '</manualCSV>') | xsltproc ", filename);
for (int i = 0; i < xml_params_count(params); i++)
info += format_string_std("--stringparam %s %s ", xml_params_get_key(params, i), xml_params_get_value(params, i));
info += format_string_std("%s/xslt/manualcsv2xml.xslt -", SUBSURFACE_SOURCE);
report_info("%s", info.c_str());
fprintf(stderr, "--stringparam %s %s ", xml_params_get_key(params, i), xml_params_get_value(params, i));
fprintf(stderr, "%s/xslt/manualcsv2xml.xslt -\n", SUBSURFACE_SOURCE);
}
#endif
ret = parse_xml_buffer(filename, mem.data(), mem.size(), log, params);

View File

@ -13,7 +13,6 @@
#include "divelist.h"
#include "divelog.h"
#include "device.h"
#include "errorhelper.h"
#include "membuffer.h"
#include "gettext.h"
@ -337,14 +336,14 @@ static int divinglog_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_cylinder0_template, diveid);
retval = sqlite3_exec(handle, get_buffer, &divinglog_cylinder, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query divinglog_cylinder0 failed.");
fprintf(stderr, "%s", "Database query divinglog_cylinder0 failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_cylinder_template, diveid);
retval = sqlite3_exec(handle, get_buffer, &divinglog_cylinder, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query divinglog_cylinder failed.");
fprintf(stderr, "%s", "Database query divinglog_cylinder failed.\n");
return 1;
}
@ -374,7 +373,7 @@ static int divinglog_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_profile_template, diveid);
retval = sqlite3_exec(handle, get_buffer, &divinglog_profile, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query divinglog_profile failed.");
fprintf(stderr, "%s", "Database query divinglog_profile failed.\n");
return 1;
}
@ -397,7 +396,7 @@ extern "C" int parse_divinglog_buffer(sqlite3 *handle, const char *url, const ch
retval = sqlite3_exec(handle, get_dives, &divinglog_dive, &state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query failed '%s'.", url);
fprintf(stderr, "Database query failed '%s'.\n", url);
return 1;
}

View File

@ -143,7 +143,7 @@ static int seac_dive(void *param, int, char **data, char **)
break;
default:
if (verbose) {
report_info("Unknown divetype %i", atoi(data[6]));
fprintf(stderr, "Unknown divetype %i", atoi(data[6]));
}
}
}
@ -174,7 +174,7 @@ static int seac_dive(void *param, int, char **data, char **)
break;
default:
if (verbose) {
report_info("Unknown salinity %i", atoi(data[8]));
fprintf(stderr, "Unknown salinity %i", atoi(data[8]));
}
}
}
@ -187,7 +187,7 @@ static int seac_dive(void *param, int, char **data, char **)
// Create sql_stmt type to query DB
retval = sqlite3_prepare_v2(handle, get_samples, -1, &sqlstmt, 0);
if (retval != SQLITE_OK) {
report_info("Preparing SQL object failed when getting SeacSync dives.");
fprintf(stderr, "%s", "Preparing SQL object failed when getting SeacSync dives.\n");
return 1;
}
@ -198,7 +198,7 @@ static int seac_dive(void *param, int, char **data, char **)
// Catch a bad query
retval = sqlite3_step(sqlstmt);
if (retval == SQLITE_ERROR) {
report_info("Getting dive data from SeacSync DB failed.");
fprintf(stderr, "%s", "Getting dive data from SeacSync DB failed.\n");
return 1;
}
@ -293,7 +293,7 @@ extern "C" int parse_seac_buffer(sqlite3 *handle, const char *url, const char *,
retval = sqlite3_exec(handle, get_dives, &seac_dive, &state, &err);
if (retval != SQLITE_OK) {
report_info("Database query failed '%s'.", url);
fprintf(stderr, "Database query failed '%s'.\n", url);
return 1;
}

View File

@ -12,7 +12,6 @@
#include "divelist.h"
#include "divelog.h"
#include "device.h"
#include "errorhelper.h"
#include "membuffer.h"
#include "gettext.h"
@ -301,7 +300,7 @@ static int shearwater_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_mode_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_mode, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_mode failed.");
fprintf(stderr, "%s", "Database query shearwater_mode failed.\n");
return 1;
}
}
@ -309,14 +308,14 @@ static int shearwater_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_cylinder_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_cylinders, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_cylinders failed.");
fprintf(stderr, "%s", "Database query shearwater_cylinders failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_changes_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_changes, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_changes failed.");
fprintf(stderr, "%s", "Database query shearwater_changes failed.\n");
return 1;
}
@ -326,7 +325,7 @@ static int shearwater_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_profile_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_profile_sample, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_profile_sample failed.");
fprintf(stderr, "%s", "Database query shearwater_profile_sample failed.\n");
return 1;
}
}
@ -431,7 +430,7 @@ static int shearwater_cloud_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_mode_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_mode, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_mode failed.");
fprintf(stderr, "%s", "Database query shearwater_mode failed.\n");
return 1;
}
}
@ -439,21 +438,21 @@ static int shearwater_cloud_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_cylinder_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_cylinders, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_cylinders failed.");
fprintf(stderr, "%s", "Database query shearwater_cylinders failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_first_gas_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_changes, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_changes failed.");
fprintf(stderr, "%s", "Database query shearwater_changes failed.\n");
return 1;
}
snprintf(get_buffer, sizeof(get_buffer) - 1, get_changes_template, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_changes, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_changes failed.");
fprintf(stderr, "%s", "Database query shearwater_changes failed.\n");
return 1;
}
@ -463,7 +462,7 @@ static int shearwater_cloud_dive(void *param, int, char **data, char **)
snprintf(get_buffer, sizeof(get_buffer) - 1, get_profile_template, dive_id, dive_id);
retval = sqlite3_exec(handle, get_buffer, &shearwater_profile_sample, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query shearwater_profile_sample failed.");
fprintf(stderr, "%s", "Database query shearwater_profile_sample failed.\n");
return 1;
}
}
@ -489,7 +488,7 @@ extern "C" int parse_shearwater_buffer(sqlite3 *handle, const char *url, const c
retval = sqlite3_exec(handle, get_dives, &shearwater_dive, &state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query failed '%s'.", url);
fprintf(stderr, "Database query failed '%s'.\n", url);
return 1;
}
@ -509,7 +508,7 @@ extern "C" int parse_shearwater_cloud_buffer(sqlite3 *handle, const char *url, c
retval = sqlite3_exec(handle, get_dives, &shearwater_cloud_dive, &state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query failed '%s'.", url);
fprintf(stderr, "Database query failed '%s'.\n", url);
return 1;
}

View File

@ -12,7 +12,6 @@
#include "divelist.h"
#include "divelog.h"
#include "device.h"
#include "errorhelper.h"
#include "membuffer.h"
#include "gettext.h"
#include "tag.h"
@ -261,19 +260,30 @@ static int dm4_dive(void *param, int, char **data, char **)
snprintf(get_events, sizeof(get_events) - 1, get_events_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_events, &dm4_events, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query dm4_events failed.");
fprintf(stderr, "%s", "Database query dm4_events failed.\n");
return 1;
}
snprintf(get_events, sizeof(get_events) - 1, get_tags_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_events, &dm4_tags, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query dm4_tags failed.");
fprintf(stderr, "%s", "Database query dm4_tags failed.\n");
return 1;
}
dive_end(state);
/*
for (i=0; i<columns;++i) {
fprintf(stderr, "%s\t", column[i]);
}
fprintf(stderr, "\n");
for (i=0; i<columns;++i) {
fprintf(stderr, "%s\t", data[i]);
}
fprintf(stderr, "\n");
//exit(0);
*/
return SQLITE_OK;
}
@ -293,7 +303,7 @@ extern "C" int parse_dm4_buffer(sqlite3 *handle, const char *url, const char *,
retval = sqlite3_exec(handle, get_dives, &dm4_dive, &state, &err);
if (retval != SQLITE_OK) {
report_info("Database query failed '%s'.", url);
fprintf(stderr, "Database query failed '%s'.\n", url);
return 1;
}
@ -420,7 +430,7 @@ static int dm5_dive(void *param, int, char **data, char **)
snprintf(get_events, sizeof(get_events) - 1, get_cylinders_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_events, &dm5_cylinders, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query dm5_cylinders failed.");
fprintf(stderr, "%s", "Database query dm5_cylinders failed.\n");
return 1;
}
@ -526,21 +536,21 @@ static int dm5_dive(void *param, int, char **data, char **)
snprintf(get_events, sizeof(get_events) - 1, get_gaschange_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_events, &dm5_gaschange, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query dm5_gaschange failed.");
fprintf(stderr, "%s", "Database query dm5_gaschange failed.\n");
return 1;
}
snprintf(get_events, sizeof(get_events) - 1, get_events_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_events, &dm4_events, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query dm4_events failed.");
fprintf(stderr, "%s", "Database query dm4_events failed.\n");
return 1;
}
snprintf(get_events, sizeof(get_events) - 1, get_tags_template, state->cur_dive->number);
retval = sqlite3_exec(handle, get_events, &dm4_tags, state, NULL);
if (retval != SQLITE_OK) {
report_info("Database query dm4_tags failed.");
fprintf(stderr, "%s", "Database query dm4_tags failed.\n");
return 1;
}
@ -565,7 +575,7 @@ extern "C" int parse_dm5_buffer(sqlite3 *handle, const char *url, const char *,
retval = sqlite3_exec(handle, get_dives, &dm5_dive, &state, &err);
if (retval != SQLITE_OK) {
report_info("Database query failed '%s'.", url);
fprintf(stderr, "Database query failed '%s'.\n", url);
return 1;
}

View File

@ -19,7 +19,6 @@
#include "sample.h"
#include "subsurface-float.h"
#include "subsurface-string.h"
#include "format.h"
#include "device.h"
#include "dive.h"
#include "errorhelper.h"
@ -51,8 +50,15 @@ static int stoptime, stopdepth, ndl, po2, cns, heartbeat, bearing;
static bool in_deco, first_temp_is_air;
static int current_gas_index;
#define INFO(fmt, ...) report_info("INFO: " fmt, ##__VA_ARGS__)
#define ERROR(fmt, ...) report_info("ERROR: " fmt, ##__VA_ARGS__)
/* logging bits from libdivecomputer */
#ifndef __ANDROID__
#define INFO(context, fmt, ...) fprintf(stderr, "INFO: " fmt "\n", ##__VA_ARGS__)
#define ERROR(context, fmt, ...) fprintf(stderr, "ERROR: " fmt "\n", ##__VA_ARGS__)
#else
#include <android/log.h>
#define INFO(context, fmt, ...) __android_log_print(ANDROID_LOG_DEBUG, __FILE__, "INFO: " fmt "\n", ##__VA_ARGS__)
#define ERROR(context, fmt, ...) __android_log_print(ANDROID_LOG_DEBUG, __FILE__, "ERROR: " fmt "\n", ##__VA_ARGS__)
#endif
/*
* Directly taken from libdivecomputer's examples/common.c to improve
@ -136,7 +142,7 @@ static dc_status_t parse_gasmixes(device_data_t *devdata, struct dive *dive, dc_
{
static bool shown_warning = false;
unsigned int i;
dc_status_t rc;
int rc;
unsigned int ntanks = 0;
rc = dc_parser_get_field(parser, DC_FIELD_TANK_COUNT, 0, &ntanks);
@ -156,7 +162,7 @@ static dc_status_t parse_gasmixes(device_data_t *devdata, struct dive *dive, dc_
}
clear_cylinder_table(&dive->cylinders);
for (i = 0; i < std::max(ngases, ntanks); i++) {
for (i = 0; i < MAX(ngases, ntanks); i++) {
cylinder_t cyl = empty_cylinder;
cyl.cylinder_use = NOT_USED;
@ -441,7 +447,7 @@ sample_cb(dc_sample_type_t type, const dc_sample_value_t *pvalue, void *userdata
break;
#ifdef DEBUG_DC_VENDOR
case DC_SAMPLE_VENDOR:
printf(" <vendor time='%u:%02u' type=\"%u\" size=\"%u\">", FRACTION_TUPLE(sample->time.seconds, 60),
printf(" <vendor time='%u:%02u' type=\"%u\" size=\"%u\">", FRACTION(sample->time.seconds, 60),
value.vendor.type, value.vendor.size);
for (int i = 0; i < value.vendor.size; ++i)
printf("%02X", ((unsigned char *)value.vendor.data)[i]);
@ -497,7 +503,7 @@ static void dev_info(device_data_t *, const char *fmt, ...)
va_end(ap);
progress_bar_text = buffer;
if (verbose)
INFO("dev_info: %s", buffer);
INFO(0, "dev_info: %s\n", buffer);
if (progress_callback)
(*progress_callback)(buffer);
@ -516,7 +522,7 @@ static void download_error(const char *fmt, ...)
report_error("Dive %d: %s", import_dive_number, buffer);
}
static dc_status_t parse_samples(device_data_t *, struct divecomputer *dc, dc_parser_t *parser)
static int parse_samples(device_data_t *, struct divecomputer *dc, dc_parser_t *parser)
{
// Parse the sample data.
return dc_parser_samples_foreach(parser, sample_cb, dc);
@ -815,7 +821,7 @@ static int dive_cb(const unsigned char *data, unsigned int size,
const unsigned char *fingerprint, unsigned int fsize,
void *userdata)
{
dc_status_t rc;
int rc;
dc_parser_t *parser = NULL;
device_data_t *devdata = (device_data_t *)userdata;
struct dive *dive = NULL;
@ -830,7 +836,7 @@ static int dive_cb(const unsigned char *data, unsigned int size,
rc = dc_parser_new(&parser, devdata->device, data, size);
if (rc != DC_STATUS_SUCCESS) {
download_error(translate("gettextFromC", "Unable to create parser for %s %s: %d"), devdata->vendor, devdata->product, errmsg(rc));
download_error(translate("gettextFromC", "Unable to create parser for %s %s"), devdata->vendor, devdata->product);
return true;
}
@ -843,14 +849,14 @@ static int dive_cb(const unsigned char *data, unsigned int size,
// Parse the dive's header data
rc = libdc_header_parser (parser, devdata, dive);
if (rc != DC_STATUS_SUCCESS) {
download_error(translate("getextFromC", "Error parsing the header: %s"), errmsg(rc));
download_error(translate("getextFromC", "Error parsing the header"));
goto error_exit;
}
// Initialize the sample data.
rc = parse_samples(devdata, &dive->dc, parser);
if (rc != DC_STATUS_SUCCESS) {
download_error(translate("gettextFromC", "Error parsing the samples: %s"), errmsg(rc));
download_error(translate("gettextFromC", "Error parsing the samples"));
goto error_exit;
}
@ -1092,14 +1098,14 @@ static void event_cb(dc_device_t *device, dc_event_type_t event, const void *dat
if (dc_descriptor_get_model(devdata->descriptor) != devinfo->model) {
dc_descriptor_t *better_descriptor = get_descriptor(dc_descriptor_get_type(devdata->descriptor), devinfo->model);
if (better_descriptor != NULL) {
report_info("EVENT_DEVINFO gave us a different detected product (model %d instead of %d), which we are using now.",
fprintf(stderr, "EVENT_DEVINFO gave us a different detected product (model %d instead of %d), which we are using now.\n",
devinfo->model, dc_descriptor_get_model(devdata->descriptor));
devdata->descriptor = better_descriptor;
devdata->product = dc_descriptor_get_product(better_descriptor);
devdata->vendor = dc_descriptor_get_vendor(better_descriptor);
devdata->model = str_printf("%s %s", devdata->vendor, devdata->product);
} else {
report_info("EVENT_DEVINFO gave us a different detected product (model %d instead of %d), but that one is unknown.",
fprintf(stderr, "EVENT_DEVINFO gave us a different detected product (model %d instead of %d), but that one is unknown.\n",
devinfo->model, dc_descriptor_get_model(devdata->descriptor));
}
}
@ -1154,19 +1160,13 @@ static const char *do_device_import(device_data_t *data)
// Register the event handler.
int events = DC_EVENT_WAITING | DC_EVENT_PROGRESS | DC_EVENT_DEVINFO | DC_EVENT_CLOCK | DC_EVENT_VENDOR;
rc = dc_device_set_events(device, events, event_cb, data);
if (rc != DC_STATUS_SUCCESS) {
dev_info(data, "Import error: %s", errmsg(rc));
if (rc != DC_STATUS_SUCCESS)
return translate("gettextFromC", "Error registering the event handler.");
}
// Register the cancellation handler.
rc = dc_device_set_cancel(device, cancel_cb, data);
if (rc != DC_STATUS_SUCCESS) {
dev_info(data, "Import error: %s", errmsg(rc));
if (rc != DC_STATUS_SUCCESS)
return translate("gettextFromC", "Error registering the cancellation handler.");
}
if (data->libdc_dump) {
dc_buffer_t *buffer = dc_buffer_new(0);
@ -1188,8 +1188,6 @@ static const char *do_device_import(device_data_t *data)
if (rc == DC_STATUS_UNSUPPORTED)
return translate("gettextFromC", "Dumping not supported on this device");
dev_info(data, "Import error: %s", errmsg(rc));
return translate("gettextFromC", "Dive data dumping error");
}
} else {
@ -1198,8 +1196,6 @@ static const char *do_device_import(device_data_t *data)
if (rc != DC_STATUS_SUCCESS) {
progress_bar_fraction = 0.0;
dev_info(data, "Import error: %s", errmsg(rc));
return translate("gettextFromC", "Dive data import error");
}
}
@ -1291,7 +1287,7 @@ static dc_status_t usbhid_device_open(dc_iostream_t **iostream, dc_context_t *co
dc_iterator_free (iterator);
if (!device) {
ERROR("didn't find HID device");
ERROR(context, "didn't find HID device\n");
return DC_STATUS_NODEVICE;
}
dev_info(data, "Opening USB HID device for %04x:%04x",
@ -1366,7 +1362,7 @@ static dc_status_t bluetooth_device_open(dc_context_t *context, device_data_t *d
dc_iterator_free (iterator);
if (!address) {
dev_info(data, "No rfcomm device found");
report_error("No rfcomm device found");
return DC_STATUS_NODEVICE;
}
@ -1386,7 +1382,7 @@ dc_status_t divecomputer_device_open(device_data_t *data)
transports &= supported;
if (!transports) {
dev_info(data, "Dive computer transport not supported");
report_error("Dive computer transport not supported");
return DC_STATUS_UNSUPPORTED;
}
@ -1503,19 +1499,18 @@ const char *do_libdivecomputer_import(device_data_t *data)
rc = divecomputer_device_open(data);
if (rc != DC_STATUS_SUCCESS) {
dev_info(data, "Import error: %s", errmsg(rc));
report_error("%s", errmsg(rc));
} else {
dev_info(data, "Connecting ...");
rc = dc_device_open(&data->device, data->context, data->descriptor, data->iostream);
if (rc != DC_STATUS_SUCCESS) {
INFO("dc_device_open error value of %d", rc);
if (subsurface_access(data->devname, R_OK | W_OK) != 0)
INFO(0, "dc_device_open error value of %d", rc);
if (rc != DC_STATUS_SUCCESS && subsurface_access(data->devname, R_OK | W_OK) != 0)
#if defined(SUBSURFACE_MOBILE)
err = translate("gettextFromC", "Error opening the device %s %s (%s).\nIn most cases, in order to debug this issue, it is useful to send the developers the log files. You can copy them to the clipboard in the About dialog.");
#else
err = translate("gettextFromC", "Error opening the device %s %s (%s).\nIn most cases, in order to debug this issue, a libdivecomputer logfile will be useful.\nYou can create this logfile by selecting the corresponding checkbox in the download dialog.");
#endif
} else {
if (rc == DC_STATUS_SUCCESS) {
dev_info(data, "Starting import ...");
err = do_device_import(data);
/* TODO: Show the logfile to the user on error. */
@ -1616,12 +1611,12 @@ dc_status_t libdc_buffer_parser(struct dive *dive, device_data_t *data, unsigned
if (dc_descriptor_get_type(data->descriptor) != DC_FAMILY_UWATEC_ALADIN && dc_descriptor_get_type(data->descriptor) != DC_FAMILY_UWATEC_MEMOMOUSE) {
rc = libdc_header_parser (parser, data, dive);
if (rc != DC_STATUS_SUCCESS) {
report_error("Error parsing the dive header data. Dive # %d: %s", dive->number, errmsg(rc));
report_error("Error parsing the dive header data. Dive # %d\nStatus = %s", dive->number, errmsg(rc));
}
}
rc = dc_parser_samples_foreach (parser, sample_cb, &dive->dc);
if (rc != DC_STATUS_SUCCESS) {
report_error("Error parsing the sample data. Dive # %d: %s", dive->number, errmsg(rc));
report_error("Error parsing the sample data. Dive # %d\nStatus = %s", dive->number, errmsg(rc));
dc_parser_destroy (parser);
return rc;
}
@ -1642,7 +1637,7 @@ dc_descriptor_t *get_descriptor(dc_family_t type, unsigned int model)
rc = dc_descriptor_iterator(&iterator);
if (rc != DC_STATUS_SUCCESS) {
report_info("Error creating the device descriptor iterator: %s", errmsg(rc));
fprintf(stderr, "Error creating the device descriptor iterator.\n");
return NULL;
}
while ((dc_iterator_next(iterator, &descriptor)) == DC_STATUS_SUCCESS) {

View File

@ -6,10 +6,9 @@
#include "divesite.h"
#include "dive.h"
#include "divelog.h"
#include "errorhelper.h"
#include "subsurface-string.h"
#include "file.h"
#include "sample.h"
#include "strndup.h"
// Convert bytes into an INT
#define array_uint16_le(p) ((unsigned int) (p)[0] \
@ -175,23 +174,28 @@ static void parse_dives(int log_version, const unsigned char *buf, unsigned int
// Dive location, assemble Location and Place
unsigned int len, place_len;
std::string location;
char *location;
len = array_uint32_le(buf + ptr);
ptr += 4;
place_len = array_uint32_le(buf + ptr + len);
if (len && place_len) {
location = std::string((char *)buf + ptr, len) + ", " +
std::string((char *)buf + ptr + len + 4, place_len);
location = (char *)malloc(len + place_len + 4);
memset(location, 0, len + place_len + 4);
memcpy(location, buf + ptr, len);
memcpy(location + len, ", ", 2);
memcpy(location + len + 2, buf + ptr + len + 4, place_len);
} else if (len) {
location = std::string((char *)buf + ptr, len);
location = strndup((char *)buf + ptr, len);
} else if (place_len) {
location = std::string((char *)buf + ptr + len + 4, place_len);
location = strndup((char *)buf + ptr + len + 4, place_len);
}
/* Store the location only if we have one */
if (!location.empty())
add_dive_to_dive_site(dive, find_or_create_dive_site_with_name(location.c_str(), sites));
if (len || place_len) {
add_dive_to_dive_site(dive, find_or_create_dive_site_with_name(location, sites));
free(location);
}
ptr += len + 4 + place_len;
@ -200,9 +204,9 @@ static void parse_dives(int log_version, const unsigned char *buf, unsigned int
ptr += 4;
// Blank notes are better than the default text
std::string notes((char *)buf + ptr, len);
if (!starts_with(notes, "Comment ..."))
dive->notes = strdup(notes.c_str());
if (len && strncmp((char *)buf + ptr, "Comment ...", 11)) {
dive->notes = strndup((char *)buf + ptr, len);
}
ptr += len;
dive->id = array_uint32_le(buf + ptr);
@ -275,11 +279,11 @@ static void parse_dives(int log_version, const unsigned char *buf, unsigned int
}
if (sample_count == 0) {
report_info("DEBUG: sample count 0 - terminating parser");
fprintf(stderr, "DEBUG: sample count 0 - terminating parser\n");
break;
}
if (ptr + sample_count * 4 + 4 > buf_size) {
report_info("DEBUG: BOF - terminating parser");
fprintf(stderr, "DEBUG: BOF - terminating parser\n");
break;
}
// we aren't using the start_cns, dive_mode, and algorithm, yet

View File

@ -23,7 +23,6 @@
#include "errorhelper.h"
#include "sample.h"
#include "subsurface-string.h"
#include "format.h"
#include "trip.h"
#include "device.h"
#include "git-access.h"
@ -1896,11 +1895,11 @@ static int do_git_load(git_repository *repo, const char *branch, struct git_pars
return ret;
}
std::string get_sha(git_repository *repo, const std::string &branch)
std::string get_sha(git_repository *repo, const char *branch)
{
char git_id_buffer[GIT_OID_HEXSZ + 1];
git_commit *commit;
if (find_commit(repo, branch.c_str(), &commit))
if (find_commit(repo, branch, &commit))
return std::string();
git_oid_tostr(git_id_buffer, sizeof(git_id_buffer), (const git_oid *)commit);
return std::string(git_id_buffer);
@ -1914,7 +1913,7 @@ std::string get_sha(git_repository *repo, const std::string &branch)
* If it is a git repository, we return zero for success,
* or report an error and return 1 if the load failed.
*/
int git_load_dives(struct git_info *info, struct divelog *log)
extern "C" int git_load_dives(struct git_info *info, struct divelog *log)
{
int ret;
struct git_parser_state state;
@ -1922,8 +1921,8 @@ int git_load_dives(struct git_info *info, struct divelog *log)
state.log = log;
if (!info->repo)
return report_error("Unable to open git repository '%s[%s]'", info->url.c_str(), info->branch.c_str());
ret = do_git_load(info->repo, info->branch.c_str(), &state);
return report_error("Unable to open git repository '%s[%s]'", info->url, info->branch);
ret = do_git_load(info->repo, info->branch, &state);
finish_active_dive(&state);
finish_active_trip(&state);
return ret;

View File

@ -169,6 +169,15 @@ void put_format(struct membuffer *b, const char *fmt, ...)
va_end(args);
}
void put_format_loc(struct membuffer *b, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
put_vformat_loc(b, fmt, args);
va_end(args);
}
void put_milli(struct membuffer *b, const char *pre, int value, const char *post)
{
int i;
@ -210,7 +219,7 @@ void put_depth(struct membuffer *b, depth_t depth, const char *pre, const char *
void put_duration(struct membuffer *b, duration_t duration, const char *pre, const char *post)
{
if (duration.seconds)
put_format(b, "%s%u:%02u%s", pre, FRACTION_TUPLE(duration.seconds, 60), post);
put_format(b, "%s%u:%02u%s", pre, FRACTION(duration.seconds, 60), post);
}
void put_pressure(struct membuffer *b, pressure_t pressure, const char *pre, const char *post)
@ -234,7 +243,7 @@ void put_degrees(struct membuffer *b, degrees_t value, const char *pre, const ch
udeg = -udeg;
sign = "-";
}
put_format(b, "%s%s%u.%06u%s", pre, sign, FRACTION_TUPLE(udeg, 1000000), post);
put_format(b, "%s%s%u.%06u%s", pre, sign, FRACTION(udeg, 1000000), post);
}
void put_location(struct membuffer *b, const location_t *loc, const char *pre, const char *post)
@ -300,10 +309,12 @@ void put_quoted(struct membuffer *b, const char *text, int is_attribute, int is_
char *add_to_string_va(char *old, const char *fmt, va_list args)
{
char *res;
struct membufferpp o, n;
struct membuffer o = { 0 }, n = { 0 };
put_vformat(&n, fmt, args);
put_format(&o, "%s\n%s", old ?: "", mb_cstring(&n));
res = strdup(mb_cstring(&o));
free_buffer(&o);
free_buffer(&n);
free((void *)old);
return res;
}

View File

@ -75,7 +75,9 @@ extern void strip_mb(struct membuffer *);
/* The pointer obtained by mb_cstring is invalidated by any modifictation to the membuffer! */
extern const char *mb_cstring(struct membuffer *);
extern __printf(2, 0) void put_vformat(struct membuffer *, const char *, va_list);
extern __printf(2, 0) void put_vformat_loc(struct membuffer *, const char *, va_list);
extern __printf(2, 3) void put_format(struct membuffer *, const char *fmt, ...);
extern __printf(2, 3) void put_format_loc(struct membuffer *, const char *fmt, ...);
extern __printf(2, 0) char *add_to_string_va(char *old, const char *fmt, va_list args);
extern __printf(2, 3) char *add_to_string(char *old, const char *fmt, ...);

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include "core/parse-gpx.h"
#include "core/errorhelper.h"
#include "core/subsurface-time.h"
#include "core/namecmp.h"
#include <QFile>
@ -26,7 +25,7 @@ int getCoordsFromGPXFile(struct dive_coords *coords, const QString &fileName)
if (!gpxFile.open(QIODevice::ReadOnly | QIODevice::Text)) {
QByteArray local8bitBAString1 = fileName.toLocal8Bit();
char *fname = local8bitBAString1.data(); // convert QString to a C string fileName
report_info("GPS file open error: file name = %s", fname);
fprintf(stderr, "GPS file open error: file name = %s\n", fname);
return 1;
}
@ -77,7 +76,7 @@ int getCoordsFromGPXFile(struct dive_coords *coords, const QString &fileName)
#ifdef GPSDEBUG
utc_mkdate(trkpt_time, &time); // print coordinates and time of each trkpt element of the GPX file as well as dive start time
report_info(" %02d: lat=%f lon=%f timestamp=%ld (%ld) %02d/%02d/%02d %02d:%02d dt=%ld %02d/%02d/%02d %02d:%02d", line, lat,
fprintf(stderr, " %02d: lat=%f lon=%f timestamp=%ld (%ld) %02d/%02d/%02d %02d:%02d dt=%ld %02d/%02d/%02d %02d:%02d\n", line, lat,
lon, trkpt_time, time_offset, time.tm_year, time.tm_mon+1, time.tm_mday, time.tm_hour, time.tm_min, divetime, dyr, dmon+1, dday,dhr, dmin);
#endif

View File

@ -26,7 +26,6 @@
#include "divesite.h"
#include "errorhelper.h"
#include "parse.h"
#include "format.h"
#include "subsurface-float.h"
#include "subsurface-string.h"
#include "subsurface-time.h"
@ -56,7 +55,7 @@ static void divedate(const char *buffer, timestamp_t *when, struct parser_state
} else if (sscanf(buffer, "%d-%d-%d %d:%d:%d", &y, &m, &d, &hh, &mm, &ss) >= 3) {
/* This is also ok */
} else {
report_info("Unable to parse date '%s'", buffer);
fprintf(stderr, "Unable to parse date '%s'\n", buffer);
return;
}
state->cur_tm.tm_year = y;
@ -166,7 +165,7 @@ static enum number_type parse_float(const char *buffer, double *res, const char
* as this is likely indication of a bug - but right now we don't have
* that information available */
if (first_time) {
report_info("Floating point value with decimal comma (%s)?", buffer);
fprintf(stderr, "Floating point value with decimal comma (%s)?\n", buffer);
first_time = false;
}
/* Try again in permissive mode*/
@ -218,7 +217,7 @@ static void pressure(const char *buffer, pressure_t *pressure, struct parser_sta
}
/* fallthrough */
default:
report_info("Strange pressure reading %s", buffer);
printf("Strange pressure reading %s\n", buffer);
}
}
@ -253,7 +252,7 @@ static void salinity(const char *buffer, int *salinity)
*salinity = lrint(val.fp * 10.0);
break;
default:
report_info("Strange salinity reading %s", buffer);
printf("Strange salinity reading %s\n", buffer);
}
}
@ -273,7 +272,7 @@ static void depth(const char *buffer, depth_t *depth, struct parser_state *state
}
break;
default:
report_info("Strange depth reading %s", buffer);
printf("Strange depth reading %s\n", buffer);
}
}
@ -306,7 +305,7 @@ static void weight(const char *buffer, weight_t *weight, struct parser_state *st
}
break;
default:
report_info("Strange weight reading %s", buffer);
printf("Strange weight reading %s\n", buffer);
}
}
@ -329,7 +328,7 @@ static void temperature(const char *buffer, temperature_t *temperature, struct p
}
break;
default:
report_info("Strange temperature reading %s", buffer);
printf("Strange temperature reading %s\n", buffer);
}
/* temperatures outside -40C .. +70C should be ignored */
if (temperature->mkelvin < ZERO_C_IN_MKELVIN - 40000 ||
@ -358,7 +357,7 @@ static void sampletime(const char *buffer, duration_t *time)
break;
default:
time->seconds = 0;
report_info("Strange sample time reading %s", buffer);
printf("Strange sample time reading %s\n", buffer);
}
}
@ -412,7 +411,7 @@ static void percent(const char *buffer, fraction_t *fraction)
break;
}
default:
report_info(translate("gettextFromC", "Strange percentage reading %s"), buffer);
printf(translate("gettextFromC", "Strange percentage reading %s\n"), buffer);
break;
}
}
@ -440,7 +439,7 @@ static void cylindersize(const char *buffer, volume_t *volume)
break;
default:
report_info("Strange volume reading %s", buffer);
printf("Strange volume reading %s\n", buffer);
break;
}
}
@ -620,7 +619,7 @@ static void fahrenheit(const char *buffer, temperature_t *temperature)
temperature->mkelvin = F_to_mkelvin(val.fp);
break;
default:
report_info("Crazy Diving Log temperature reading %s", buffer);
fprintf(stderr, "Crazy Diving Log temperature reading %s\n", buffer);
}
}
@ -656,7 +655,7 @@ static void psi_or_bar(const char *buffer, pressure_t *pressure)
pressure->mbar = lrint(val.fp * 1000);
break;
default:
report_info("Crazy Diving Log PSI reading %s", buffer);
fprintf(stderr, "Crazy Diving Log PSI reading %s\n", buffer);
}
}
@ -1069,7 +1068,7 @@ static void uddf_datetime(const char *buffer, timestamp_t *when, struct parser_s
if (i == 6)
goto success;
bad_date:
report_info("Bad date time %s", buffer);
printf("Bad date time %s\n", buffer);
return;
success:
@ -1169,7 +1168,7 @@ static void gps_lat(const char *buffer, struct dive *dive, struct parser_state *
add_dive_to_dive_site(dive, create_dive_site_with_gps(NULL, &location, state->log->sites));
} else {
if (ds->location.lat.udeg && ds->location.lat.udeg != location.lat.udeg)
report_info("Oops, changing the latitude of existing dive site id %8x name %s; not good", ds->uuid, ds->name ?: "(unknown)");
fprintf(stderr, "Oops, changing the latitude of existing dive site id %8x name %s; not good\n", ds->uuid, ds->name ?: "(unknown)");
ds->location.lat = location.lat;
}
}
@ -1185,7 +1184,7 @@ static void gps_long(const char *buffer, struct dive *dive, struct parser_state
add_dive_to_dive_site(dive, create_dive_site_with_gps(NULL, &location, state->log->sites));
} else {
if (ds->location.lon.udeg && ds->location.lon.udeg != location.lon.udeg)
report_info("Oops, changing the longitude of existing dive site id %8x name %s; not good", ds->uuid, ds->name ?: "(unknown)");
fprintf(stderr, "Oops, changing the longitude of existing dive site id %8x name %s; not good\n", ds->uuid, ds->name ?: "(unknown)");
ds->location.lon = location.lon;
}
}
@ -1226,7 +1225,7 @@ static void gps_in_dive(const char *buffer, struct dive *dive, struct parser_sta
if (dive_site_has_gps_location(ds) &&
has_location(&location) && !same_location(&ds->location, &location)) {
// Houston, we have a problem
report_info("dive site uuid in dive, but gps location (%10.6f/%10.6f) different from dive location (%10.6f/%10.6f)",
fprintf(stderr, "dive site uuid in dive, but gps location (%10.6f/%10.6f) different from dive location (%10.6f/%10.6f)\n",
ds->location.lat.udeg / 1000000.0, ds->location.lon.udeg / 1000000.0,
location.lat.udeg / 1000000.0, location.lon.udeg / 1000000.0);
std::string coords = printGPSCoordsC(&location);
@ -2219,11 +2218,11 @@ extern "C" int parse_dlf_buffer(unsigned char *buffer, size_t size, struct divel
break;
case 2:
/* Measure He */
//report_info("%ds he2 cells(0.01 mV): %d %d", time, (ptr[5] << 8) + ptr[4], (ptr[9] << 8) + ptr[8]);
//printf("%ds he2 cells(0.01 mV): %d %d\n", time, (ptr[5] << 8) + ptr[4], (ptr[9] << 8) + ptr[8]);
break;
case 3:
/* Measure Oxygen */
//report_info("%d s: o2 cells(0.01 mV): %d %d %d %d", time, (ptr[5] << 8) + ptr[4], (ptr[7] << 8) + ptr[6], (ptr[9] << 8) + ptr[8], (ptr[11] << 8) + ptr[10]);
//printf("%d s: o2 cells(0.01 mV): %d %d %d %d\n", time, (ptr[5] << 8) + ptr[4], (ptr[7] << 8) + ptr[6], (ptr[9] << 8) + ptr[8], (ptr[11] << 8) + ptr[10]);
// [Pa/mV] coeficient O2
// 100 Pa == 1 mbar
sample_start(&state);

View File

@ -71,7 +71,6 @@ int get_picture_idx(const struct picture_table *t, const char *filename)
return -1;
}
#if !defined(SUBSURFACE_MOBILE)
/* Return distance of timestamp to time of dive. Result is always positive, 0 means during dive. */
static timestamp_t time_from_dive(const struct dive *d, timestamp_t timestamp)
{
@ -119,6 +118,7 @@ static bool dive_check_picture_time(const struct dive *d, timestamp_t timestamp)
return time_from_dive(d, timestamp) < D30MIN;
}
#if !defined(SUBSURFACE_MOBILE)
/* Creates a picture and indicates the dive to which this picture should be added.
* The caller is responsible for actually adding the picture to the dive.
* If no appropriate dive was found, no picture is created and NULL is returned.

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/* planner.cpp
/* planner.c
*
* code that allows us to plan future dives
*
@ -26,7 +26,7 @@
#include "qthelper.h"
#include "version.h"
static constexpr int base_timestep = 2; // seconds
#define TIMESTEP 2 /* second */
static int decostoplevels_metric[] = { 0, 3000, 6000, 9000, 12000, 15000, 18000, 21000, 24000, 27000,
30000, 33000, 36000, 39000, 42000, 45000, 48000, 51000, 54000, 57000,
@ -42,7 +42,7 @@ static int decostoplevels_imperial[] = { 0, 3048, 6096, 9144, 12192, 15240, 1828
325120, 345440, 365760, 386080 };
#if DEBUG_PLAN
extern "C" void dump_plan(struct diveplan *diveplan)
void dump_plan(struct diveplan *diveplan)
{
struct divedatapoint *dp;
struct tm tm;
@ -59,13 +59,13 @@ extern "C" void dump_plan(struct diveplan *diveplan)
diveplan->surface_pressure);
dp = diveplan->dp;
while (dp) {
printf("\t%3u:%02u: %6dmm cylid: %2d setpoint: %d\n", FRACTION_TUPLE(dp->time, 60), dp->depth, dp->cylinderid, dp->setpoint);
printf("\t%3u:%02u: %6dmm cylid: %2d setpoint: %d\n", FRACTION(dp->time, 60), dp->depth, dp->cylinderid, dp->setpoint);
dp = dp->next;
}
}
#endif
extern "C" bool diveplan_empty(struct diveplan *diveplan)
bool diveplan_empty(struct diveplan *diveplan)
{
struct divedatapoint *dp;
if (!diveplan || !diveplan->dp)
@ -80,7 +80,7 @@ extern "C" bool diveplan_empty(struct diveplan *diveplan)
}
/* get the cylinder index at a certain time during the dive */
extern "C" int get_cylinderid_at_time(struct dive *dive, struct divecomputer *dc, duration_t time)
int get_cylinderid_at_time(struct dive *dive, struct divecomputer *dc, duration_t time)
{
// we start with the first cylinder unless an event tells us otherwise
int cylinder_idx = 0;
@ -111,8 +111,9 @@ static void interpolate_transition(struct deco_state *ds, struct dive *dive, dur
}
/* returns the tissue tolerance at the end of this (partial) dive */
static int tissue_at_end(struct deco_state *ds, struct dive *dive, const struct divecomputer *dc, deco_state_cache &cache)
static int tissue_at_end(struct deco_state *ds, struct dive *dive, struct deco_state **cached_datap)
{
struct divecomputer *dc;
struct sample *sample, *psample;
int i;
depth_t lastdepth = {};
@ -122,12 +123,13 @@ static int tissue_at_end(struct deco_state *ds, struct dive *dive, const struct
if (!dive)
return 0;
if (cache) {
cache.restore(ds, true);
if (*cached_datap) {
restore_deco_state(*cached_datap, ds, true);
} else {
surface_interval = init_decompression(ds, dive, true);
cache.cache(ds);
cache_deco_state(ds, cached_datap);
}
dc = &dive->dc;
if (!dc->samples)
return 0;
psample = sample = dc->sample;
@ -206,9 +208,10 @@ static void update_cylinder_pressure(struct dive *d, int old_depth, int new_dept
/* overwrite the data in dive
* return false if something goes wrong */
static void create_dive_from_plan(struct diveplan *diveplan, struct dive *dive, struct divecomputer *dc, bool track_gas)
static void create_dive_from_plan(struct diveplan *diveplan, struct dive *dive, bool track_gas)
{
struct divedatapoint *dp;
struct divecomputer *dc;
struct sample *sample;
struct event *ev;
cylinder_t *cyl;
@ -216,7 +219,7 @@ static void create_dive_from_plan(struct diveplan *diveplan, struct dive *dive,
int lasttime = 0, last_manual_point = 0;
depth_t lastdepth = {.mm = 0};
int lastcylid;
enum divemode_t type = dc->divemode;
enum divemode_t type = dive->dc.divemode;
if (!diveplan || !diveplan->dp)
return;
@ -228,6 +231,7 @@ static void create_dive_from_plan(struct diveplan *diveplan, struct dive *dive,
// reset the cylinders and clear out the samples and events of the
// dive-to-be-planned so we can restart
reset_cylinders(dive, track_gas);
dc = &dive->dc;
dc->when = dive->when = diveplan->when;
dc->surface_pressure.mbar = diveplan->surface_pressure;
dc->salinity = diveplan->salinity;
@ -315,7 +319,7 @@ static void create_dive_from_plan(struct diveplan *diveplan, struct dive *dive,
finish_sample(dc);
dp = dp->next;
}
dc->last_manual_time.seconds = last_manual_point;
dive->dc.last_manual_time.seconds = last_manual_point;
#if DEBUG_PLAN & 32
save_dive(stdout, dive);
@ -323,7 +327,7 @@ static void create_dive_from_plan(struct diveplan *diveplan, struct dive *dive,
return;
}
extern "C" void free_dps(struct diveplan *diveplan)
void free_dps(struct diveplan *diveplan)
{
if (!diveplan)
return;
@ -340,7 +344,7 @@ static struct divedatapoint *create_dp(int time_incr, int depth, int cylinderid,
{
struct divedatapoint *dp;
dp = (divedatapoint *)malloc(sizeof(struct divedatapoint));
dp = malloc(sizeof(struct divedatapoint));
dp->time = time_incr;
dp->depth.mm = depth;
dp->cylinderid = cylinderid;
@ -367,7 +371,7 @@ static void add_to_end_of_diveplan(struct diveplan *diveplan, struct divedatapoi
dp->time += lasttime;
}
extern "C" struct divedatapoint *plan_add_segment(struct diveplan *diveplan, int duration, int depth, int cylinderid, int po2, bool entered, enum divemode_t divemode)
struct divedatapoint *plan_add_segment(struct diveplan *diveplan, int duration, int depth, int cylinderid, int po2, bool entered, enum divemode_t divemode)
{
struct divedatapoint *dp = create_dp(duration, depth, cylinderid, divemode == CCR ? po2 : 0);
dp->entered = entered;
@ -397,10 +401,10 @@ static int setpoint_change(struct dive *dive, int cylinderid)
}
}
static std::vector<gaschanges> analyze_gaslist(struct diveplan *diveplan, struct dive *dive, int depth, int *asc_cylinder, bool ccr)
static struct gaschanges *analyze_gaslist(struct diveplan *diveplan, struct dive *dive, int *gaschangenr, int depth, int *asc_cylinder, bool ccr)
{
size_t nr = 0;
std::vector<gaschanges> gaschanges;
int nr = 0;
struct gaschanges *gaschanges = NULL;
struct divedatapoint *dp = diveplan->dp;
struct divedatapoint *best_ascent_dp = NULL;
bool total_time_zero = true;
@ -409,11 +413,10 @@ static std::vector<gaschanges> analyze_gaslist(struct diveplan *diveplan, struct
if (dp->depth.mm <= depth) {
int i = 0;
nr++;
gaschanges.resize(nr);
while (i < static_cast<int>(nr) - 1) {
gaschanges = realloc(gaschanges, nr * sizeof(struct gaschanges));
while (i < nr - 1) {
if (dp->depth.mm < gaschanges[i].depth) {
for (int j = static_cast<int>(nr) - 2; j >= i; j--)
gaschanges[j + 1] = gaschanges[j];
memmove(gaschanges + i + 1, gaschanges + i, (nr - i - 1) * sizeof(struct gaschanges));
break;
}
i++;
@ -432,11 +435,12 @@ static std::vector<gaschanges> analyze_gaslist(struct diveplan *diveplan, struct
}
dp = dp->next;
}
*gaschangenr = nr;
if (best_ascent_dp) {
*asc_cylinder = best_ascent_dp->cylinderid;
}
#if DEBUG_PLAN & 16
for (size_t nr = 0; nr < gaschanges.size(); nr++) {
for (nr = 0; nr < *gaschangenr; nr++) {
int idx = gaschanges[nr].gasidx;
printf("gaschange nr %d: @ %5.2lfm gasidx %d (%s)\n", nr, gaschanges[nr].depth / 1000.0,
idx, gasname(&get_cylinder(&dive, idx)->gasmix));
@ -446,23 +450,20 @@ static std::vector<gaschanges> analyze_gaslist(struct diveplan *diveplan, struct
}
/* sort all the stops into one ordered list */
static std::vector<int> sort_stops(int dstops[], size_t dnr, std::vector<gaschanges> gstops)
static int *sort_stops(int *dstops, int dnr, struct gaschanges *gstops, int gnr)
{
int total = dnr + gstops.size();
std::vector<int> stoplevels(total);
/* Can't happen. */
if (dnr == 0)
return std::vector<int>();
int i, gi, di;
int total = dnr + gnr;
int *stoplevels = malloc(total * sizeof(int));
/* no gaschanges */
if (gstops.empty()) {
std::copy(dstops, dstops + dnr, stoplevels.begin());
if (gnr == 0) {
memcpy(stoplevels, dstops, dnr * sizeof(int));
return stoplevels;
}
int i = static_cast<int>(total) - 1;
int gi = static_cast<int>(gstops.size()) - 1;
int di = static_cast<int>(dnr) - 1;
i = total - 1;
gi = gnr - 1;
di = dnr - 1;
while (i >= 0) {
if (dstops[di] > gstops[gi].depth) {
stoplevels[i] = dstops[di];
@ -492,7 +493,7 @@ static std::vector<int> sort_stops(int dstops[], size_t dnr, std::vector<gaschan
#if DEBUG_PLAN & 16
int k;
for (k = static_cast<int>(gstops.size()) + dnr - 1; k >= 0; k--) {
for (k = gnr + dnr - 1; k >= 0; k--) {
printf("stoplevel[%d]: %5.2lfm\n", k, stoplevels[k] / 1000.0);
if (stoplevels[k] == 0)
break;
@ -501,8 +502,9 @@ static std::vector<int> sort_stops(int dstops[], size_t dnr, std::vector<gaschan
return stoplevels;
}
extern "C" int ascent_velocity(int depth, int avg_depth, int)
int ascent_velocity(int depth, int avg_depth, int bottom_time)
{
UNUSED(bottom_time);
/* We need to make this configurable */
/* As an example (and possibly reasonable default) this is the Tech 1 provedure according
@ -526,10 +528,10 @@ static void track_ascent_gas(int depth, struct dive *dive, int cylinder_id, int
{
cylinder_t *cylinder = get_cylinder(dive, cylinder_id);
while (depth > 0) {
int deltad = ascent_velocity(depth, avg_depth, bottom_time) * base_timestep;
int deltad = ascent_velocity(depth, avg_depth, bottom_time) * TIMESTEP;
if (deltad > depth)
deltad = depth;
update_cylinder_pressure(dive, depth, depth - deltad, base_timestep, prefs.decosac, cylinder, true, divemode);
update_cylinder_pressure(dive, depth, depth - deltad, TIMESTEP, prefs.decosac, cylinder, true, divemode);
if (depth <= 5000 && depth >= (5000 - deltad) && safety_stop) {
update_cylinder_pressure(dive, 5000, 5000, 180, prefs.decosac, cylinder, true, divemode);
safety_stop = false;
@ -543,12 +545,12 @@ static bool trial_ascent(struct deco_state *ds, int wait_time, int trial_depth,
{
bool clear_to_ascend = true;
deco_state_cache trial_cache;
struct deco_state *trial_cache = NULL;
// For consistency with other VPM-B implementations, we should not start the ascent while the ceiling is
// deeper than the next stop (thus the offgasing during the ascent is ignored).
// However, we still need to make sure we don't break the ceiling due to on-gassing during ascent.
trial_cache.cache(ds);
cache_deco_state(ds, &trial_cache);
if (wait_time)
add_segment(ds, depth_to_bar(trial_depth, dive),
gasmix,
@ -557,19 +559,20 @@ static bool trial_ascent(struct deco_state *ds, int wait_time, int trial_depth,
double tolerance_limit = tissue_tolerance_calc(ds, dive, depth_to_bar(stoplevel, dive), true);
update_regression(ds, dive);
if (deco_allowed_depth(tolerance_limit, surface_pressure, dive, 1) > stoplevel) {
trial_cache.restore(ds, false);
restore_deco_state(trial_cache, ds, false);
free(trial_cache);
return false;
}
}
while (trial_depth > stoplevel) {
double tolerance_limit;
int deltad = ascent_velocity(trial_depth, avg_depth, bottom_time) * base_timestep;
int deltad = ascent_velocity(trial_depth, avg_depth, bottom_time) * TIMESTEP;
if (deltad > trial_depth) /* don't test against depth above surface */
deltad = trial_depth;
add_segment(ds, depth_to_bar(trial_depth, dive),
gasmix,
base_timestep, po2, divemode, prefs.decosac, true);
TIMESTEP, po2, divemode, prefs.decosac, true);
tolerance_limit = tissue_tolerance_calc(ds, dive, depth_to_bar(trial_depth, dive), true);
if (decoMode(true) == VPMB)
update_regression(ds, dive);
@ -580,7 +583,8 @@ static bool trial_ascent(struct deco_state *ds, int wait_time, int trial_depth,
}
trial_depth -= deltad;
}
trial_cache.restore(ds, false);
restore_deco_state(trial_cache, ds, false);
free(trial_cache);
return clear_to_ascend;
}
@ -651,7 +655,7 @@ static void average_max_depth(struct diveplan *dive, int *avg_depth, int *max_de
*avg_depth = *max_depth = 0;
}
bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, int dcNr, int timestep, struct decostop *decostoptable, deco_state_cache &cache, bool is_planner, bool show_disclaimer)
bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, int timestep, struct decostop *decostoptable, struct deco_state **cached_datap, bool is_planner, bool show_disclaimer)
{
int bottom_depth;
@ -660,16 +664,18 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
bool is_final_plan = true;
int bottom_time;
int previous_deco_time;
deco_state_cache bottom_cache;
struct deco_state *bottom_cache = NULL;
struct sample *sample;
int po2;
int transitiontime, gi;
int current_cylinder, stop_cylinder;
size_t stopidx;
int stopidx;
int depth;
struct gaschanges *gaschanges = NULL;
int gaschangenr;
int *decostoplevels;
size_t decostoplevelcount;
std::vector<int> stoplevels;
int decostoplevelcount;
int *stoplevels = NULL;
bool stopping = false;
bool pendinggaschange = false;
int clock, previous_point_time;
@ -680,22 +686,21 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
bool o2break_next = false;
int break_cylinder = -1, breakfrom_cylinder = 0;
bool last_segment_min_switch = false;
bool error = false;
int error = 0;
bool decodive = false;
int first_stop_depth = 0;
int laststoptime = timestep;
bool o2breaking = false;
int decostopcounter = 0;
struct divecomputer *dc = get_dive_dc(dive, dcNr);
enum divemode_t divemode = dc->divemode;
enum divemode_t divemode = dive->dc.divemode;
set_gf(diveplan->gflow, diveplan->gfhigh);
set_vpmb_conservatism(diveplan->vpmb_conservatism);
if (!diveplan->surface_pressure) {
// Lets use dive's surface pressure in planner, if have one...
if (dc->surface_pressure.mbar) { // First from DC...
diveplan->surface_pressure = dc->surface_pressure.mbar;
if (dive->dc.surface_pressure.mbar) { // First from DC...
diveplan->surface_pressure = dive->dc.surface_pressure.mbar;
}
else if (dive->surface_pressure.mbar) { // After from user...
diveplan->surface_pressure = dive->surface_pressure.mbar;
@ -707,20 +712,19 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
clear_deco(ds, dive->surface_pressure.mbar / 1000.0, true);
ds->max_bottom_ceiling_pressure.mbar = ds->first_ceiling_pressure.mbar = 0;
create_dive_from_plan(diveplan, dive, dc, is_planner);
create_dive_from_plan(diveplan, dive, is_planner);
// Do we want deco stop array in metres or feet?
if (prefs.units.length == units::METERS ) {
if (prefs.units.length == METERS ) {
decostoplevels = decostoplevels_metric;
decostoplevelcount = std::size(decostoplevels_metric);
decostoplevelcount = sizeof(decostoplevels_metric) / sizeof(int);
} else {
decostoplevels = decostoplevels_imperial;
decostoplevelcount = std::size(decostoplevels_imperial);
decostoplevelcount = sizeof(decostoplevels_imperial) / sizeof(int);
}
/* If the user has selected last stop to be at 6m/20', we need to get rid of the 3m/10' stop.
* Otherwise reinstate the last stop 3m/10' stop.
* Remark: not reentrant, but the user probably won't change preferences while this is running.
*/
if (prefs.last_stop)
*(decostoplevels + 1) = 0;
@ -728,20 +732,20 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
*(decostoplevels + 1) = M_OR_FT(3,10);
/* Let's start at the last 'sample', i.e. the last manually entered waypoint. */
sample = &dc->sample[dc->samples - 1];
sample = &dive->dc.sample[dive->dc.samples - 1];
/* Keep time during the ascend */
bottom_time = clock = previous_point_time = dc->sample[dc->samples - 1].time.seconds;
bottom_time = clock = previous_point_time = dive->dc.sample[dive->dc.samples - 1].time.seconds;
current_cylinder = get_cylinderid_at_time(dive, dc, sample->time);
current_cylinder = get_cylinderid_at_time(dive, &dive->dc, sample->time);
// Find the divemode at the end of the dive
const struct event *ev = NULL;
divemode = UNDEF_COMP_TYPE;
divemode = get_current_divemode(dc, bottom_time, &ev, &divemode);
divemode = get_current_divemode(&dive->dc, bottom_time, &ev, &divemode);
gas = get_cylinder(dive, current_cylinder)->gasmix;
po2 = sample->setpoint.mbar;
depth = dc->sample[dc->samples - 1].depth.mm;
depth = dive->dc.sample[dive->dc.samples - 1].depth.mm;
average_max_depth(diveplan, &avg_depth, &max_depth);
last_ascend_rate = ascent_velocity(depth, avg_depth, bottom_time);
@ -752,7 +756,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
*/
transitiontime = lrint(depth / (double)prefs.ascratelast6m);
plan_add_segment(diveplan, transitiontime, 0, current_cylinder, po2, false, divemode);
create_dive_from_plan(diveplan, dive, dc, is_planner);
create_dive_from_plan(diveplan, dive, is_planner);
return false;
}
@ -764,7 +768,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
/* Find the gases available for deco */
std::vector<gaschanges> gaschanges = analyze_gaslist(diveplan, dive, depth, &best_first_ascend_cylinder, divemode == CCR && !prefs.dobailout);
gaschanges = analyze_gaslist(diveplan, dive, &gaschangenr, depth, &best_first_ascend_cylinder, divemode == CCR && !prefs.dobailout);
/* Find the first potential decostopdepth above current depth */
for (stopidx = 0; stopidx < decostoplevelcount; stopidx++)
@ -773,13 +777,13 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
if (stopidx > 0)
stopidx--;
/* Stoplevels are either depths of gas changes or potential deco stop depths. */
stoplevels = sort_stops(decostoplevels, stopidx + 1, gaschanges);
stopidx += gaschanges.size();
stoplevels = sort_stops(decostoplevels, stopidx + 1, gaschanges, gaschangenr);
stopidx += gaschangenr;
gi = static_cast<int>(gaschanges.size()) - 1;
gi = gaschangenr - 1;
/* Set tissue tolerance and initial vpmb gradient at start of ascent phase */
diveplan->surface_interval = tissue_at_end(ds, dive, dc, cache);
diveplan->surface_interval = tissue_at_end(ds, dive, cached_datap);
nuclear_regeneration(ds, clock);
vpmb_start_gradient(ds);
if (decoMode(true) == RECREATIONAL) {
@ -796,7 +800,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
po2, diveplan->surface_pressure / 1000.0, dive, divemode) &&
enough_gas(dive, current_cylinder) && clock < 6 * 3600);
// We did stay one timestep too many.
// We did stay one DECOTIMESTEP too many.
// In the best of all worlds, we would roll back also the last add_segment in terms of caching deco state, but
// let's ignore that since for the eventual ascent in recreational mode, nobody looks at the ceiling anymore,
// so we don't really have to compute the deco state.
@ -806,7 +810,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
previous_point_time = clock;
do {
/* Ascend to surface */
int deltad = ascent_velocity(depth, avg_depth, bottom_time) * base_timestep;
int deltad = ascent_velocity(depth, avg_depth, bottom_time) * TIMESTEP;
if (ascent_velocity(depth, avg_depth, bottom_time) != last_ascend_rate) {
plan_add_segment(diveplan, clock - previous_point_time, depth, current_cylinder, po2, false, divemode);
previous_point_time = clock;
@ -815,7 +819,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
if (depth - deltad < 0)
deltad = depth;
clock += base_timestep;
clock += TIMESTEP;
depth -= deltad;
if (depth <= 5000 && depth >= (5000 - deltad) && safety_stop) {
plan_add_segment(diveplan, clock - previous_point_time, 5000, current_cylinder, po2, false, divemode);
@ -827,10 +831,12 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
}
} while (depth > 0);
plan_add_segment(diveplan, clock - previous_point_time, 0, current_cylinder, po2, false, divemode);
create_dive_from_plan(diveplan, dive, dc, is_planner);
create_dive_from_plan(diveplan, dive, is_planner);
add_plan_to_notes(diveplan, dive, show_disclaimer, error);
fixup_dc_duration(dc);
fixup_dc_duration(&dive->dc);
free(stoplevels);
free(gaschanges);
return false;
}
@ -845,20 +851,21 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
}
// VPM-B or Buehlmann Deco
tissue_at_end(ds, dive, dc, cache);
tissue_at_end(ds, dive, cached_datap);
if ((divemode == CCR || divemode == PSCR) && prefs.dobailout) {
divemode = OC;
po2 = 0;
int bailoutsegment = std::max(prefs.min_switch_duration, 60 * prefs.problemsolvingtime);
int bailoutsegment = MAX(prefs.min_switch_duration, 60 * prefs.problemsolvingtime);
add_segment(ds, depth_to_bar(depth, dive),
get_cylinder(dive, current_cylinder)->gasmix,
bailoutsegment, po2, divemode, prefs.bottomsac, true);
plan_add_segment(diveplan, bailoutsegment, depth, current_cylinder, po2, false, divemode);
bottom_time += bailoutsegment;
last_segment_min_switch = true;
}
previous_deco_time = 100000000;
ds->deco_time = 10000000;
bottom_cache.cache(ds); // Lets us make several iterations
cache_deco_state(ds, &bottom_cache); // Lets us make several iterations
bottom_depth = depth;
bottom_gi = gi;
bottom_gas = gas;
@ -872,7 +879,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
vpmb_next_gradient(ds, ds->deco_time, diveplan->surface_pressure / 1000.0, true);
previous_deco_time = ds->deco_time;
bottom_cache.restore(ds, true);
restore_deco_state(bottom_cache, ds, true);
depth = bottom_depth;
gi = bottom_gi;
@ -905,7 +912,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
/* We will break out when we hit the surface */
do {
/* Ascend to next stop depth */
int deltad = ascent_velocity(depth, avg_depth, bottom_time) * base_timestep;
int deltad = ascent_velocity(depth, avg_depth, bottom_time) * TIMESTEP;
if (ascent_velocity(depth, avg_depth, bottom_time) != last_ascend_rate) {
if (is_final_plan)
plan_add_segment(diveplan, clock - previous_point_time, depth, current_cylinder, po2, false, divemode);
@ -918,11 +925,11 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
add_segment(ds, depth_to_bar(depth, dive),
get_cylinder(dive, current_cylinder)->gasmix,
base_timestep, po2, divemode, prefs.decosac, true);
TIMESTEP, po2, divemode, prefs.decosac, true);
last_segment_min_switch = false;
clock += base_timestep;
clock += TIMESTEP;
depth -= deltad;
/* Print VPM-Gradient as gradient factor, this has to be done from within deco.cpp */
/* Print VPM-Gradient as gradient factor, this has to be done from within deco.c */
if (decodive)
ds->plot_depth = depth;
} while (depth > 0 && depth > stoplevels[stopidx]);
@ -948,10 +955,10 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
stopping = true;
previous_point_time = clock;
current_cylinder = gaschanges[gi].gasidx;
gas = get_cylinder(dive, current_cylinder)->gasmix;
if (divemode == CCR)
po2 = setpoint_change(dive, current_cylinder);
#if DEBUG_PLAN & 16
gas = get_cylinder(dive, current_cylinder)->gasmix;
printf("switch to gas %d (%d/%d) @ %5.2lfm\n", gaschanges[gi].gasidx,
(get_o2(&gas) + 5) / 10, (get_he(&gas) + 5) / 10, gaschanges[gi].depth / 1000.0);
#endif
@ -1004,10 +1011,10 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
*/
if (pendinggaschange) {
current_cylinder = gaschanges[gi + 1].gasidx;
gas = get_cylinder(dive, current_cylinder)->gasmix;
if (divemode == CCR)
po2 = setpoint_change(dive, current_cylinder);
#if DEBUG_PLAN & 16
gas = get_cylinder(dive, current_cylinder)->gasmix;
printf("switch to gas %d (%d/%d) @ %5.2lfm\n", gaschanges[gi + 1].gasidx,
(get_o2(&gas) + 5) / 10, (get_he(&gas) + 5) / 10, gaschanges[gi + 1].depth / 1000.0);
#endif
@ -1017,6 +1024,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
get_cylinder(dive, current_cylinder)->gasmix,
prefs.min_switch_duration, po2, divemode, prefs.decosac, true);
clock += prefs.min_switch_duration;
last_segment_min_switch = true;
}
pendinggaschange = false;
}
@ -1026,7 +1034,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
laststoptime = new_clock - clock;
/* Finish infinite deco */
if (laststoptime >= 48 * 3600 && depth >= 6000) {
error = true;
error = LONGDECO;
break;
}
@ -1053,6 +1061,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
plan_add_segment(diveplan, laststoptime, depth, current_cylinder, po2, false, divemode);
previous_point_time = clock + laststoptime;
current_cylinder = break_cylinder;
gas = get_cylinder(dive, current_cylinder)->gasmix;
}
} else if (o2break_next) {
if (laststoptime >= 6 * 60) {
@ -1064,6 +1073,7 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
plan_add_segment(diveplan, laststoptime, depth, current_cylinder, po2, false, divemode);
previous_point_time = clock + laststoptime;
current_cylinder = breakfrom_cylinder;
gas = get_cylinder(dive, current_cylinder)->gasmix;
}
}
}
@ -1109,10 +1119,13 @@ bool plan(struct deco_state *ds, struct diveplan *diveplan, struct dive *dive, i
current_cylinder = dive->cylinders.nr;
plan_add_segment(diveplan, prefs.surface_segment, 0, current_cylinder, 0, false, OC);
}
create_dive_from_plan(diveplan, dive, dc, is_planner);
create_dive_from_plan(diveplan, dive, is_planner);
add_plan_to_notes(diveplan, dive, show_disclaimer, error);
fixup_dc_duration(dc);
fixup_dc_duration(&dive->dc);
free(stoplevels);
free(gaschanges);
free(bottom_cache);
return decodive;
}
@ -1166,7 +1179,7 @@ static int get_permille(const char *begin, const char **end)
return value;
}
extern "C" int validate_gas(const char *text, struct gasmix *gas)
int validate_gas(const char *text, struct gasmix *gas)
{
int o2, he;
@ -1213,7 +1226,7 @@ extern "C" int validate_gas(const char *text, struct gasmix *gas)
return 1;
}
extern "C" int validate_po2(const char *text, int *mbar_po2)
int validate_po2(const char *text, int *mbar_po2)
{
int po2;

Some files were not shown because too many files have changed in this diff Show More