diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1ced6d41b..8c07d5c79 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -34,29 +34,39 @@ jobs: needs: skip_duplicate if: ${{ needs.skip_duplicate.outputs.should_skip == 'false' }} strategy: + fail-fast: false matrix: - python-version: [3.8, 3.9] - runs-on: ubuntu-latest + python-version: ["3.9","3.10","3.11"] + os: ["ubuntu-latest", "macos-latest", "macos-12"] + runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - python -m pip install --upgrade pip wheel - pip install --upgrade astromodels + python -m pip install --upgrade pip wheel setuptools numpy + + # Temp fix due to speclite not supporting the latest version + pip install "matplotlib<3.9" + + if [[ ${{matrix.os}} == macos-latest ]]; + then + brew update + brew install hdf5 + fi + if [[ "${ISDEV}" == "true" ]]; then - pip install --upgrade --pre --no-deps astromodels + pip install --upgrade --pre astromodels + else + pip install --upgrade astromodels fi pip install --upgrade flake8 coverage pytest-cov cython - # temp fix for speclite - pip install git+https://github.com/desihub/speclite.git - pip install -e . env: ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} @@ -82,20 +92,31 @@ jobs: needs: skip_duplicate if: ${{ needs.skip_duplicate.outputs.should_skip == 'false' }} strategy: + fail-fast: false matrix: - python-version: [ 3.8, 3.9] - runs-on: ubuntu-latest + python-version: ["3.9","3.10","3.11"] + os: [ "ubuntu-latest", "macos-latest", "macos-12"] + runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip wheel - python -m pip install cython extension_helpers astropy numpy numba tempita h5py + + # Temp fix due to speclite not supporting the latest version + pip install "matplotlib<3.9" + + if [[ ${{matrix.os}} == macos-latest ]]; + then + brew update + brew install hdf5 + fi + git clone https://github.com/threeML/astromodels cd astromodels git checkout dev @@ -106,9 +127,6 @@ jobs: pip install --upgrade flake8 coverage pytest-cov cython - # temp fix for speclite - pip install git+https://github.com/desihub/speclite.git - pip install -e . - name: Lint with flake8 run: | @@ -136,38 +154,34 @@ jobs: strategy: fail-fast: false matrix: - os: ["ubuntu-latest", "macos-latest"] + os: ["ubuntu-latest", "macos-12", "macos-latest"] python-version: [3.9] + include: + - environment: ci/environment.yml + channel: threeml + - environment: ci/environment_noxspec.yml + channel: threeml/label/dev + os: macos-latest runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: XCode uses: maxim-lobanov/setup-xcode@v1 with: xcode-version: latest if: runner.os == 'macOS' - - name: Cache conda - uses: actions/cache@v2 - env: - # Increase this value to reset cache if etc/example-environment.yml has not changed - CACHE_NUMBER: 0 - with: - path: ~/conda_pkgs_dir - key: conda-${{ matrix.os }}-python-${{ matrix.python-version }}-${{ hashFiles('ci/environment.yml') }} - - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true auto-activate-base: false - miniforge-variant: Mambaforge + miniforge-version: latest activate-environment: test_env python-version: ${{ matrix.python-version }} - channels: conda-forge, xspecmodels, threeml, fermi, defaults - environment-file: ci/environment.yml + channels: conda-forge, xspecmodels, ${{ matrix.channel }}, fermi + environment-file: ${{ matrix.environment }} use-only-tar-bz2: true - - name: Init Env shell: bash -l {0} run: | @@ -184,7 +198,8 @@ jobs: mamba install -c conda-forge -c threeml/label/dev "threeml/label/dev::astromodels" fi - pip install fermipy + pip install --upgrade speclite + pip install fermipy "matplotlib<3.9" env: ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Conda list @@ -206,8 +221,8 @@ jobs: - name: test it shell: bash -l {0} run: | - wget https://raw.githubusercontent.com/fermi-lat/pyBurstAnalysisGUI/master/python/GtBurst/updater.py -O $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py - python $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py + #wget https://raw.githubusercontent.com/fermi-lat/pyBurstAnalysisGUI/master/python/GtBurst/updater.py -O $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py + #python $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py python -m pytest -vv --cov=threeML --cov-report=xml env: @@ -228,9 +243,12 @@ jobs: name: Publish to PyPi if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') runs-on: ubuntu-latest + permissions: + # IMPORTANT: this permission is mandatory for trusted publishing + id-token: write steps: - name: Checkout source - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v1 with: @@ -240,10 +258,8 @@ jobs: pip install wheel setuptools -U python setup.py sdist bdist_wheel - name: Publish - uses: pypa/gh-action-pypi-publish@v1.1.0 + uses: pypa/gh-action-pypi-publish@release/v1 with: - user: __token__ - password: ${{ secrets.PYPI_TOKEN }} skip-existing: true test-publish-pypi: @@ -251,8 +267,8 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: 3.9 diff --git a/.github/workflows/conda_build.yml b/.github/workflows/conda_build.yml index 004855c8c..b1310a921 100644 --- a/.github/workflows/conda_build.yml +++ b/.github/workflows/conda_build.yml @@ -13,12 +13,17 @@ jobs: strategy: fail-fast: false matrix: - os: ["ubuntu-latest", "macos-latest"] + os: [ubuntu-latest, macos-latest, macos-12] python-version: [3.9] - + include: + - environment: ci/environment.yml + channel: threeml + - environment: ci/environment_noxspec.yml + channel: threeml/label/dev + os: macos-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: XCode @@ -27,28 +32,20 @@ jobs: xcode-version: latest if: runner.os == 'macOS' - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Cache conda - uses: actions/cache@v2 - with: - path: ~/conda_pkgs_dir - key: conda-${{ matrix.os }}-python-${{ matrix.python-version }}-${{ hashFiles('ci/environment.yml') }} - name: Add conda ${{ matrix.python-version }} to system path - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: activate-environment: "test_env" auto-activate-base: false miniforge-variant: Mambaforge - architecture: "x64" - #conda-build-version: "*" python-version: ${{ matrix.python-version }} auto-update-conda: true - environment-file: ci/environment.yml - channels: conda-forge, xspecmodels, threeml, fermi, defaults + environment-file: ${{ matrix.environment }} + channels: conda-forge, xspecmodels, ${{ matrix.channel }}, fermi use-only-tar-bz2: true - - name: Init Env shell: bash -l {0} run: | @@ -57,6 +54,7 @@ jobs: echo "PKG_VERSION=$PKG_VERSION" >> $GITHUB_ENV echo "HOME= ${HOME}" + echo "Conda installation path ${CONDA}" echo "Building ${PKG_VERSION} ..." echo "Python version: ${{matrix.python-version}}" echo "Running on development branch: ${ISDEV} " @@ -66,7 +64,8 @@ jobs: mamba install -c conda-forge -c threeml/label/dev "threeml/label/dev::astromodels" fi - pip install fermipy + pip install --upgrade speclite + pip install fermipy "matplotlib<3.9" env: ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Conda list @@ -83,13 +82,14 @@ jobs: conda mambabuild --python=${{matrix.python-version}} conda-dist/recipes/threeml - conda install --use-local -c conda-forge threeml + #conda install --use-local -c conda-forge threeml + conda install -c ${CONDA}/envs/test_env/conda-bld/ threeml - name: Test conda build shell: bash -l {0} run: | - wget https://raw.githubusercontent.com/fermi-lat/pyBurstAnalysisGUI/master/python/GtBurst/updater.py -O $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py - python $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py + #wget https://raw.githubusercontent.com/fermi-lat/pyBurstAnalysisGUI/master/python/GtBurst/updater.py -O $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py + #python $CONDA_PREFIX/lib/python${{ matrix.python-version }}/site-packages/fermitools/GtBurst/updater.py python -m pytest -vv --cov=threeML --cov-report=xml env: @@ -105,35 +105,29 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest, macos-latest, macos-12] python-version: [3.9] - + include: + - environment: ci/environment.yml + channel: threeml + - environment: ci/environment_noxspec.yml + channel: threeml/label/dev + os: macos-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Cache conda - uses: actions/cache@v2 - with: - path: ~/conda_pkgs_dir - key: conda-${{ matrix.os }}-python-${{ matrix.python-version }}-${{ hashFiles('ci/environment.yml') }} - name: Add conda ${{ matrix.python-version }} to system path - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: activate-environment: "test_env" auto-activate-base: false miniforge-variant: Mambaforge - architecture: "x64" - #conda-build-version: "*" python-version: ${{ matrix.python-version }} auto-update-conda: true - environment-file: ci/environment.yml - channels: conda-forge, xspecmodels, threeml, fermi, defaults + environment-file: ${{ matrix.environment }} + channels: conda-forge, xspecmodels, ${{ matrix.channel }}, fermi use-only-tar-bz2: true - name: Init Env @@ -144,6 +138,7 @@ jobs: echo "PKG_VERSION=$PKG_VERSION" >> $GITHUB_ENV echo "HOME= ${HOME}" + echo "Conda installation path ${CONDA}" echo "Building ${PKG_VERSION} ..." echo "Python version: ${{matrix.python-version}}" @@ -174,11 +169,15 @@ jobs: if [[ "${{matrix.os}}" == "ubuntu-latest" ]]; then - anaconda -v --show-traceback -t $UPLOAD_TOKEN upload -u threeml /usr/share/miniconda/conda-bld/linux-64/*.tar.bz2 --force $LABEL + anaconda -v --show-traceback -t $UPLOAD_TOKEN upload -u threeml ${CONDA}/envs/test_env/conda-bld/linux-64/*.tar.bz2 --force $LABEL + + elif [[ ${{matrix.os}} == macos-latest ]]; then + + anaconda -v --show-traceback -t $UPLOAD_TOKEN upload -u threeml ${CONDA}/envs/test_env/conda-bld/osx-arm64/*.tar.bz2 --force $LABEL else - anaconda -v --show-traceback -t $UPLOAD_TOKEN upload -u threeml /usr/local/miniconda/conda-bld/osx-64/*.tar.bz2 --force $LABEL + anaconda -v --show-traceback -t $UPLOAD_TOKEN upload -u threeml ${CONDA}/envs/test_env/conda-bld/osx-64/*.tar.bz2 --force $LABEL fi diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ef1c4f011..0e4ac0ac7 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -7,9 +7,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 - name: install latex @@ -30,20 +30,30 @@ jobs: pip install zeus-mcmc - pip install git+https://github.com/desihub/speclite.git - pip install black - + #pip install git+https://github.com/desihub/speclite.git + pip install --upgrade speclite + pip install "black<24" pip install astromodels - name: Install the package shell: bash -l {0} run: | + if [[ "${ISDEV}" == "true" ]]; then + pip install --upgrade --pre astromodels + #pip install git+https://github.com/threeML/astromodels.git@dev + else + pip install --upgrade astromodels + fi pip install -e . + env: + ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Execute the notebooks shell: bash -l {0} run: | + # Make sure we fail in case of error + set -e # copy the doc configuration over mkdir -p ~/.config/threeML cp threeML/data/doc_config.yml ~/.config/threeML/ @@ -60,7 +70,7 @@ jobs: NUMEXPR_NUM_THREADS: 1 MPLBACKEND: "Agg" - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: notebooks-for-fast-${{ github.sha }} path: docs/notebooks @@ -68,12 +78,12 @@ jobs: hal_notebooks: name: "Build the HAL notebooks" - runs-on: macos-latest + runs-on: macos-12 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v4 env: # Increase this value to reset cache if etc/example-environment.yml has not changed CACHE_NUMBER: 0 @@ -82,7 +92,7 @@ jobs: key: conda-hal_notebooks-python-3.9-${{ hashFiles('ci/environment_hal.yml') }} - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true auto-activate-base: false @@ -96,8 +106,8 @@ jobs: - name: Init Environment shell: bash -l {0} run: | - set -e # Make sure we fail in case of error + set -e # miniconda_os=Linux # compilers="gcc_linux-64 gxx_linux-64 gfortran_linux-64" @@ -116,16 +126,23 @@ jobs: - name: Install the package shell: bash -l {0} run: | - - pip install -e . - - pip install black + pip install "black<24" + pip install --upgrade speclite pip install git+https://github.com/threeml/hawc_hal.git - + if [[ "${ISDEV}" == "true" ]]; then + pip install --upgrade --pre astromodels + #pip install git+https://github.com/threeML/astromodels.git@dev + else + pip install --upgrade astromodels + fi + pip install -e . + env: + ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Execute the notebooks shell: bash -l {0} run: | - + # Make sure we fail in case of error + set -e # copy the doc configuration over mkdir -p ~/.config/threeML cp threeML/data/doc_config.yml ~/.config/threeML/ @@ -139,19 +156,19 @@ jobs: NUMEXPR_NUM_THREADS: 1 MPLBACKEND: "Agg" - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: notebooks-for-hal-${{ github.sha }} path: docs/notebooks fermi_notebooks: name: "Build the Fermi notebooks" - runs-on: macos-latest + runs-on: macos-12 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v4 env: # Increase this value to reset cache if etc/example-environment.yml has not changed CACHE_NUMBER: 0 @@ -160,7 +177,7 @@ jobs: key: conda-fermi_notebooks-python-3.7-${{ hashFiles('ci/environment_fermi.yml') }} - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true auto-activate-base: false @@ -189,21 +206,28 @@ jobs: #sudo apt-get install texlive conda install ${compilers} jupytext jupyterthemes jupyter_latex_envs emcee pymultinest ultranest - - + - name: Install the package shell: bash -l {0} run: | - + pip install "black<24" fermipy + pip install --upgrade speclite + + if [[ "${ISDEV}" == "true" ]]; then + pip install --upgrade --pre astromodels + #pip install git+https://github.com/threeML/astromodels.git@dev + else + pip install --upgrade astromodels + fi pip install -e . - - pip install black - pip install fermipy + env: + ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Execute the notebooks shell: bash -l {0} run: | - + # Make sure we fail in case of error + set -e # copy the doc configuration over mkdir -p ~/.config/threeML cp threeML/data/doc_config.yml ~/.config/threeML/ @@ -211,15 +235,8 @@ jobs: jupytext --to ipynb --pipe black --execute docs/md_docs/slow_execute/Fermipy_LAT.md - # Update GtBurst: - #wget https://raw.githubusercontent.com/fermi-lat/pyBurstAnalysisGUI/master/python/GtBurst/updater.py -O $CONDA_PREFIX/lib/python3.7/site-packages/fermitools/GtBurst/updater.py - #python $CONDA_PREFIX/lib/python3.7/site-packages/fermitools/GtBurst/updater.py - jupytext --to ipynb --pipe black --execute docs/md_docs/slow_execute/LAT_Transient_Builder_Example.md - #jupytext --to py --pipe black docs/md_docs/slow_execute/LAT_Transient_Builder_Example.md - #python docs/md_docs/slow_execute/LAT_Transient_Builder_Example.py - #jupytext --to ipynb --pipe black --execute docs/md_docs/slow_execute/optimizer_docs.md mv docs/md_docs/slow_execute/*.ipynb docs/notebooks/ @@ -231,21 +248,19 @@ jobs: NUMEXPR_NUM_THREADS: 1 MPLBACKEND: "Agg" - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: notebooks-for-fermi-${{ github.sha }} path: docs/notebooks - - - + xspec_notebooks: name: "Build the XSPEC notebooks" - runs-on: macos-latest + runs-on: macos-12 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v4 env: # Increase this value to reset cache if etc/example-environment.yml has not changed CACHE_NUMBER: 0 @@ -254,7 +269,7 @@ jobs: key: conda-xspec_notebooks-python-3.9-${{ hashFiles('ci/environment_xspec.yml') }} - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true auto-activate-base: false @@ -283,28 +298,27 @@ jobs: #sudo apt-get install texlive mamba install ${compilers} jupytext jupyterthemes jupyter_latex_envs emcee pymultinest ultranest - - if [[ "${ISDEV}" == "true" ]]; then - - mamba install -c conda-forge -c threeml/label/dev "threeml/label/dev::astromodels" - - fi - env: - ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Install the package shell: bash -l {0} run: | - + pip install zeus_mcmc dynesty "black<24" + pip install --upgrade speclite + #pip install git+https://github.com/desihub/speclite.git + if [[ "${ISDEV}" == "true" ]]; then + pip install --upgrade --pre astromodels + #pip install git+https://github.com/threeML/astromodels.git@dev + else + pip install --upgrade astromodels + fi pip install -e . - - pip install black - pip install git+https://github.com/desihub/speclite.git - + env: + ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Execute the notebooks shell: bash -l {0} run: | - + # Make sure we fail in case of error + set -e # copy the doc configuration over mkdir -p ~/.config/threeML cp threeML/data/doc_config.yml ~/.config/threeML/ @@ -315,26 +329,26 @@ jobs: mv docs/md_docs/slow_execute/*.ipynb docs/notebooks/ ls docs/notebooks - - + env: OMP_NUM_THREADS: 1 MKL_NUM_THREADS: 1 NUMEXPR_NUM_THREADS: 1 MPLBACKEND: "Agg" - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: notebooks-for-xspec-${{ github.sha }} path: docs/notebooks + multinest_notebooks: name: "Build the multinest notebooks" - runs-on: macos-latest + runs-on: macos-12 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v4 env: # Increase this value to reset cache if etc/example-environment.yml has not changed CACHE_NUMBER: 0 @@ -343,7 +357,7 @@ jobs: key: conda-hal_notebooks-python-3.7-${{ hashFiles('ci/environment_hal.yml') }} - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true auto-activate-base: false @@ -373,28 +387,29 @@ jobs: conda install ${compilers} jupytext jupyterthemes jupyter_latex_envs emcee ultranest - # temporary fix for fermi tools - - - name: Install the package shell: bash -l {0} run: | - - pip install -e . - pip install zeus_mcmc dynesty - pip install black + pip install zeus_mcmc dynesty "black<24" + pip install --upgrade speclite + #pip install git+https://github.com/desihub/speclite.git #pip install --no-binary :all: root_numpy - pip install git+https://github.com/desihub/speclite.git + if [[ "${ISDEV}" == "true" ]]; then - pip install --upgrade --pre --no-deps astromodels + pip install --upgrade --pre astromodels + #pip install git+https://github.com/threeML/astromodels.git@dev + else + pip install --upgrade astromodels fi + pip install -e . env: ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - name: Execute the notebooks shell: bash -l {0} run: | - + # Make sure we fail in case of error + set -e # copy the doc configuration over mkdir -p ~/.config/threeML cp threeML/data/doc_config.yml ~/.config/threeML/ @@ -419,7 +434,7 @@ jobs: NUMEXPR_NUM_THREADS: 1 MPLBACKEND: "Agg" - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: notebooks-for-multinest-${{ github.sha }} path: docs/notebooks @@ -429,11 +444,11 @@ jobs: upload_notebooks: needs: [fast_notebooks, fermi_notebooks, hal_notebooks, xspec_notebooks, multinest_notebooks] name: "Upload notebooks and trigger RTD" - runs-on: macos-latest + runs-on: macos-12 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - uses: actions/download-artifact@master @@ -466,22 +481,16 @@ jobs: run: | ls docs/notebooks - - - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: notebooks-for-${{ github.sha }} path: docs/notebooks - - - name: Sleep for 5 min + - name: Sleep for 10 min uses: juliangruber/sleep-action@v1 with: - time: 5m - - - + time: 10m - name: Trigger RTDs build uses: dfm/rtds-action@main @@ -493,15 +502,15 @@ jobs: api_doc: name: "Create the API stubs" - runs-on: macos-latest + runs-on: macos-12 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token fetch-depth: 0 # otherwise, you will failed to push refs to dest repo - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 @@ -511,15 +520,16 @@ jobs: brew install c-blosc brew install hdf5 - pip3 install cython tempita - pip3 install numpy scipy numba astropy + pip3 install cython blosc2 h5py + pip3 install numpy scipy numba astropy pandas + pip3 install wheel pkgconfig numexpr - python setup.py develop + #python setup.py develop + pip3 install -e . brew install sphinx-doc pandoc - pip3 install wheel pip3 install mock recommonmark pip3 install sphinx-rtd-dark-mode pip3 install -U sphinx @@ -528,7 +538,7 @@ jobs: sphinx-apidoc -f -o docs/api/ threeML - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: name: api-stubs-for-${{ github.sha }} path: docs/api @@ -538,18 +548,18 @@ jobs: build_docs: name: "Build the Documentation" - runs-on: macos-latest + runs-on: macos-12 needs: [upload_notebooks, api_doc] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token fetch-depth: 0 # otherwise, you will failed to push refs to dest repo - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 @@ -559,23 +569,29 @@ jobs: brew install c-blosc brew install hdf5 - pip3 install wheel + pip3 install wheel pkgconfig pip3 install tempita + pip3 install cython blosc2 h5py pip3 install numpy scipy numba astropy brew install sphinx-doc pandoc - pip3 install mock recommonmark pip3 install sphinx-rtd-dark-mode sphinx-math-dollar pip3 install -U sphinx pip3 install -r docs/requirements.txt - python3 setup.py develop + if [[ "${ISDEV}" == "true" ]]; then + pip install --upgrade --pre astromodels + #pip install git+https://github.com/threeML/astromodels.git@dev + else + pip install --upgrade astromodels + fi + pip install -e . rm -rf docs/md_docs/* - - + env: + ISDEV: ${{contains(github.ref, 'dev') || contains(github.base_ref, 'dev')}} - uses: actions/download-artifact@master with: @@ -602,4 +618,4 @@ jobs: uses: ad-m/github-push-action@master with: github_token: ${{ secrets.GITHUB_TOKEN }} - branch: gh-pages + branch: gh-pages \ No newline at end of file diff --git a/.github/workflows/install_and_test.yml b/.github/workflows/install_and_test.yml index 5cf517d5f..e4af9b526 100644 --- a/.github/workflows/install_and_test.yml +++ b/.github/workflows/install_and_test.yml @@ -16,14 +16,14 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: XCode uses: maxim-lobanov/setup-xcode@v1 with: xcode-version: latest if: runner.os == 'macOS' - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true auto-activate-base: true @@ -53,14 +53,14 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: XCode uses: maxim-lobanov/setup-xcode@v1 with: xcode-version: latest if: runner.os == 'macOS' - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: auto-update-conda: true auto-activate-base: true diff --git a/.github/workflows/pip_install.yml b/.github/workflows/pip_install.yml index 85ea8edb7..3f02069ee 100644 --- a/.github/workflows/pip_install.yml +++ b/.github/workflows/pip_install.yml @@ -15,24 +15,33 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: XCode uses: maxim-lobanov/setup-xcode@v1 with: xcode-version: latest if: runner.os == 'macOS' - name: setup python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install base wheel run: | python -m pip install --upgrade pip wheel + if [[ ${{matrix.os}} == macos-latest ]]; + then + brew update + brew install hdf5 + # This installation of numpy is currently needed to avoid an issue + # while running pip install of astromodels + pip install numpy + fi python -m pip install astromodels - name: Install 3ML run: | python setup.py install + test-install-pip: strategy: fail-fast: false @@ -48,14 +57,94 @@ jobs: xcode-version: latest if: runner.os == 'macOS' - name: setup python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install base wheel run: | python -m pip install --upgrade pip wheel + if [[ ${{matrix.os}} == macos-latest ]]; + then + brew update + brew install hdf5 + # This installation of numpy is currently needed to avoid an issue + # while running pip install of astromodels + pip install numpy + fi python -m pip install astromodels - name: Install 3ML run: | pip install threeML + + test-install-conda: + strategy: + fail-fast: false + matrix: + os: ["ubuntu-latest", "macos-latest", "macos-12"] + python-version: [3.9] + runs-on: ${{ matrix.os }} + steps: + - name: XCode + uses: maxim-lobanov/setup-xcode@v1 + with: + xcode-version: latest + if: runner.os == 'macOS' + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v3 + with: + activate-environment: "test_env" + auto-activate-base: false + miniforge-variant: Mambaforge + python-version: ${{ matrix.python-version }} + auto-update-conda: true + channels: threeml, xspecmodels, fermi, conda-forge + - name: Install packages + run: | + mamba activate test_env + mamba install threeML astromodels xspec-modelsonly fermitools #fermipy + - name: Test threeML and astromodels + run: | + pytest -vv --pyargs astromodels + pytest -vv --pyargs threeML + env: + OMP_NUM_THREADS: 1 + MKL_NUM_THREADS: 1 + NUMEXPR_NUM_THREADS: 1 + MPLBACKEND: "Agg" + + test-install-conda-dev: + strategy: + fail-fast: false + matrix: + os: ["ubuntu-latest", "macos-latest", "macos-12"] + python-version: [3.9] + runs-on: ${{ matrix.os }} + steps: + - name: XCode + uses: maxim-lobanov/setup-xcode@v1 + with: + xcode-version: latest + if: runner.os == 'macOS' + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v3 + with: + activate-environment: "test_env" + auto-activate-base: false + miniforge-variant: Mambaforge + python-version: ${{ matrix.python-version }} + auto-update-conda: true + channels: threeml/label/dev, xspecmodels, fermi, conda-forge + - name: Install packages + run: | + mamba activate test_env + mamba install threeML astromodels xspec-modelsonly fermitools #fermipy + - name: Test threeML and astromodels + run: | + pytest -vv --pyargs astromodels + pytest -vv --pyargs threeML + env: + OMP_NUM_THREADS: 1 + MKL_NUM_THREADS: 1 + NUMEXPR_NUM_THREADS: 1 + MPLBACKEND: "Agg" \ No newline at end of file diff --git a/.github/workflows/test_against_xspec.yml b/.github/workflows/test_against_xspec.yml index 4fa45fbc9..71d7a752c 100644 --- a/.github/workflows/test_against_xspec.yml +++ b/.github/workflows/test_against_xspec.yml @@ -11,12 +11,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Python 3.9 - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 - name: Cache XPSEC - uses: actions/cache@v2 + uses: actions/cache@v4 id: cache-xspec with: path: ~/xspec_home @@ -80,7 +80,7 @@ jobs: fi - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: | export HEADAS=`ls -d ~/xspec_home/xspec-install/x86_64-pc-linux-gnu-libc*/`; @@ -108,12 +108,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Python 3.9 - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 - name: Cache XPSEC - uses: actions/cache@v2 + uses: actions/cache@v4 id: cache-xspec with: path: ~/xspec_home @@ -177,7 +177,7 @@ jobs: fi - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: | export HEADAS=`ls -d ~/xspec_home/xspec-install/x86_64-pc-linux-gnu-libc*/`; @@ -213,12 +213,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Python 3.9 - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 - name: Cache XPSEC - uses: actions/cache@v2 + uses: actions/cache@v4 id: cache-xspec with: path: ~/xspec_home @@ -282,7 +282,7 @@ jobs: fi - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies run: | export HEADAS=`ls -d ~/xspec_home/xspec-install/x86_64-pc-linux-gnu-libc*/`; @@ -318,11 +318,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Python 3.9 - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 - name: Add conda ${{ matrix.python-version }} to system path - uses: conda-incubator/setup-miniconda@v2 + uses: conda-incubator/setup-miniconda@v3 with: activate-environment: "test_env" auto-activate-base: false @@ -335,7 +335,7 @@ jobs: use-only-tar-bz2: true - name: Cache XPSEC - uses: actions/cache@v2 + uses: actions/cache@v4 id: cache-xspec with: path: ~/xspec_home @@ -398,7 +398,7 @@ jobs: rm -rf $XSPEC_BUILD_DIR ~/xspec_home/heasoft-src.tar.gz fi - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install dependencies shell: bash -l {0} run: | diff --git a/.readthedocs.yml b/.readthedocs.yml index 9ed2604ae..b3ed6dda9 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,7 +1,11 @@ version: 2 +build: + os: ubuntu-22.04 + tools: + python: "3.9" + python: - version: 3.7 install: - requirements: docs/requirements.txt - method: pip diff --git a/ci/environment.yml b/ci/environment.yml index 5b9bfadc6..f3eb45668 100644 --- a/ci/environment.yml +++ b/ci/environment.yml @@ -2,9 +2,9 @@ name: test_env channels: - threeml - - conda-forge - xspecmodels - fermi + - conda-forge - defaults dependencies: - pip @@ -18,7 +18,7 @@ dependencies: - scipy>=1.4 - emcee>=3 - astropy - - matplotlib + - matplotlib<3.9 - uncertainties - pyyaml>=5.1 - dill diff --git a/ci/environment_fermi.yml b/ci/environment_fermi.yml index 0f0eca529..1301ed985 100644 --- a/ci/environment_fermi.yml +++ b/ci/environment_fermi.yml @@ -2,9 +2,9 @@ name: test_env channels: - - conda-forge - threeml - fermi + - conda-forge - defaults dependencies: - pip @@ -17,7 +17,7 @@ dependencies: - scipy>=1.4 - emcee>=3 - astropy - - matplotlib + - matplotlib<3.9 - uncertainties - pyyaml>=5.1 - dill diff --git a/ci/environment_hal.yml b/ci/environment_hal.yml index a5c80c245..1970dfcb5 100644 --- a/ci/environment_hal.yml +++ b/ci/environment_hal.yml @@ -2,8 +2,8 @@ # when xspec is needed name: test_env channels: - - conda-forge - threeml + - conda-forge - defaults dependencies: - pip @@ -16,7 +16,7 @@ dependencies: - scipy>=1.4 - emcee>=3 - astropy - - matplotlib + - matplotlib<3.9 - uncertainties - pyyaml>=5.1 - dill @@ -50,3 +50,6 @@ dependencies: - awkward - hist - mplhep + - joblib + - h5py + - rich \ No newline at end of file diff --git a/ci/environment_noxspec.yml b/ci/environment_noxspec.yml new file mode 100644 index 000000000..8715a5e36 --- /dev/null +++ b/ci/environment_noxspec.yml @@ -0,0 +1,49 @@ +# environment file fo CI testing +name: test_env +channels: + - threeml + - fermi + - conda-forge + - defaults +dependencies: + - pip + - pytest + - pytest-cov + - astromodels>=2 + - codecov + - coverage + - setuptools + - numpy>=1.15 + - scipy>=1.4 + - emcee>=3 + - astropy + - matplotlib<3.9 + - uncertainties + - pyyaml>=5.1 + - dill + - iminuit>=2 + - astroquery + - corner>=1.0.2 + - pandas>=0.23 + - requests + - speclite + - dynesty>=1 + - pygmo>=2.4 + - ipywidgets + - ipython + - ipyparallel + - numba + - xz + - py + - numexpr + - ipopt + - numdifftools + - tqdm + - future + - fermitools>=2 + - colorama + - omegaconf + - flake8 + - h5py + - rich + - joblib \ No newline at end of file diff --git a/ci/environment_xspec.yml b/ci/environment_xspec.yml index f815d6ee5..3f9df3fb2 100644 --- a/ci/environment_xspec.yml +++ b/ci/environment_xspec.yml @@ -9,7 +9,7 @@ channels: dependencies: - pip - astromodels>=2 - - xspec-modelsonly + - xspec-modelsonly==6.30.1 - pytest - pytest-cov - codecov @@ -19,7 +19,7 @@ dependencies: - scipy>=1.4 - emcee>=3 - astropy - - matplotlib + - matplotlib<3.9 - uncertainties - pyyaml>=5.1 - dill @@ -48,4 +48,7 @@ dependencies: - colorama - omegaconf - flake8 + - h5py + - rich + - joblib diff --git a/conda-dist/recipes/threeml/meta.yaml b/conda-dist/recipes/threeml/meta.yaml index 5757c3542..3327d11b2 100644 --- a/conda-dist/recipes/threeml/meta.yaml +++ b/conda-dist/recipes/threeml/meta.yaml @@ -20,47 +20,35 @@ requirements: # - {{ compiler('cxx') }} # - {{ compiler('fortran') }} - python - #- setuptools - #- toolchain - numpy>=1.15 - #- scipy >=0.18 - emcee>=3 - #- astropy >=1.0.3 - #- matplotlib - uncertainties - pyyaml>=5.1 - dill - iminuit>=2 - astromodels>=2 - - astroquery<0.4 # [py2k] - - astroquery # [py3k] + - astroquery - corner>=1.0.2 - #- corner - #- pandas - requests - speclite - - multinest - - pymultinest - - ultranest # [py3k] + - multinest # [not arm64] + - pymultinest # [not arm64] + - ultranest # [not arm64] - dynesty>=1 - #- pygmo>=2.4,<=2.11.4 # [py2k] - pygmo>=2.4 - ipywidgets - numba - #- ipython - ipyparallel - py - #- tk==8.5.19 # [py2k] - - ipopt<3.13 # [py2k] - numdifftools - - interpolation>=2.1.5 # [py3k] + - interpolation>=2.1.5 - tqdm>=4.56.0 - colorama - omegaconf run: - python - - numpy>=1.15 # [py3k] + - numpy>=1.15 - scipy>=0.18 - emcee>=3 - astropy>=1.0.3 @@ -70,17 +58,15 @@ requirements: - dill - iminuit>=2 - astromodels>=2.3.8 - - astroquery<0.4 # [py2k] - - astroquery # [py3k] + - astroquery - corner>=1.0.2 - pandas>=0.23 - requests - speclite - - multinest - - pymultinest - - ultranest # [py3k] + - multinest # [not arm64] + - pymultinest # [not arm64] + - ultranest # [not arm64] - dynesty>=1 - #- pygmo>=2.4,<=2.11.4 # [py2k] - pygmo>=2.4 - ipywidgets - ipython @@ -88,10 +74,8 @@ requirements: - numba - xz - py - - pytest<4 # [py2k] - - pytest # [py3k] + - pytest - numexpr - - ipopt<3.13 # [py2k] - numdifftools - tqdm>=4.56.0 - omegaconf diff --git a/docs/environment.yml b/docs/environment.yml deleted file mode 100644 index 5a866a665..000000000 --- a/docs/environment.yml +++ /dev/null @@ -1,11 +0,0 @@ -name: threeML -channels: - - conda-forge/label/cf201901 - - conda-forge - - threeml - - defaults -dependencies: - - astromodels - - python=2.7 - - threeml - diff --git a/docs/index.rst b/docs/index.rst index 886c31a21..b421d5848 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -48,6 +48,7 @@ Though **Maximum Likelihood** is in the name for historical reasons, 3ML is an i notebooks/joint_fitting_xrt_and_gbm_xspec_models.ipynb notebooks/flux_examples.ipynb notebooks/Fermipy_LAT.ipynb + notebooks/LAT_Transient_Builder_Example.ipynb notebooks/Time-energy-fit.ipynb notebooks/synthetic_spectra.ipynb notebooks/gof_lrt.ipynb diff --git a/docs/md_docs/fast_execute/Quickstart.md b/docs/md_docs/fast_execute/Quickstart.md index 93e95037d..9a7882341 100644 --- a/docs/md_docs/fast_execute/Quickstart.md +++ b/docs/md_docs/fast_execute/Quickstart.md @@ -5,10 +5,10 @@ jupyter: text_representation: extension: .md format_name: markdown - format_version: '1.2' - jupytext_version: 1.7.1 + format_version: '1.3' + jupytext_version: 1.15.2 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -21,13 +21,11 @@ In this simple example we will generate some simulated data, and fit them with 3 Let's start by generating our dataset: -```python +```python import warnings warnings.simplefilter('ignore') import numpy as np np.seterr(all="ignore") - - ``` @@ -36,14 +34,12 @@ np.seterr(all="ignore") from threeML import * ``` -```python +```python from jupyterthemes import jtplot %matplotlib inline jtplot.style(context="talk", fscale=1, ticks=True, grid=False) silence_warnings() set_threeML_style() - - ``` @@ -74,7 +70,7 @@ fit_function = Powerlaw() xyl = XYLike("data", x, y, y_err) -parameters, like_values = xyl.fit(fit_function) +results = xyl.fit(fit_function) ``` Plot data and model: @@ -96,6 +92,9 @@ The procedure outlined above works for any distribution for the data (Gaussian o ```python import scipy.stats +# Retrieve the likelihood values +like_values = results.get_statistic_frame() + # Compute the number of degrees of freedom n_dof = len(xyl.x) - len(fit_function.free_parameters) diff --git a/docs/md_docs/fast_execute/faq.md b/docs/md_docs/fast_execute/faq.md index 4820a48ed..3fd1a3b10 100644 --- a/docs/md_docs/fast_execute/faq.md +++ b/docs/md_docs/fast_execute/faq.md @@ -5,10 +5,10 @@ jupyter: text_representation: extension: .md format_name: markdown - format_version: '1.2' - jupytext_version: 1.7.1 + format_version: '1.3' + jupytext_version: 1.15.2 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -59,7 +59,7 @@ print(fit_function.index) xyl = XYLike("data", x, y, y_err) -parameters, like_values = xyl.fit(fit_function) +results = xyl.fit(fit_function) print("power law index after fit:") @@ -77,7 +77,3 @@ When a plugin is created, it does not have a likelihood model set initially. Thi ## Why did my plugin lose its model? If you use the same plugin with different models bvy passing it to successive JointLikelihood or BayesianAnalysis constructors, the plugin will have the last model with which it was used set as its model. - -```python - -``` diff --git a/docs/md_docs/fast_execute/flux_examples.md b/docs/md_docs/fast_execute/flux_examples.md index 5170aed70..ddb2d1df4 100644 --- a/docs/md_docs/fast_execute/flux_examples.md +++ b/docs/md_docs/fast_execute/flux_examples.md @@ -6,9 +6,9 @@ jupyter: extension: .md format_name: markdown format_version: '1.3' - jupytext_version: 1.13.1 + jupytext_version: 1.15.2 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -27,10 +27,6 @@ Let's explore the possibilites. -```python - -``` - ```python import warnings warnings.simplefilter("ignore") @@ -221,7 +217,6 @@ threeML_config.point_source.integrate_flux_method = "quad" result.get_flux(ene_min=1*u.keV, ene_max = 1*u.MeV, - flux_unit="erg/cm2/s") ``` @@ -246,15 +241,15 @@ result.get_flux(ene_min=10*u.keV, flux_unit="1/(cm2 s)") ``` -As well as choose which component to compute +As well as choose which source and component to compute ```python result.get_flux(ene_min=10*u.keV, ene_max = 0.5*u.MeV, + sources=["src2"], use_components=True, components_to_use =["Blackbody"], - - flux_unit="erg2/(cm2 s)") + flux_unit="1/(cm2 s)") ``` Finally, the returned flux object is a pandas table and can be manipulated as such: diff --git a/docs/md_docs/slow_execute/Fermipy_LAT.md b/docs/md_docs/slow_execute/Fermipy_LAT.md index f4d183960..952c502e8 100644 --- a/docs/md_docs/slow_execute/Fermipy_LAT.md +++ b/docs/md_docs/slow_execute/Fermipy_LAT.md @@ -6,7 +6,7 @@ jupyter: extension: .md format_name: markdown format_version: '1.3' - jupytext_version: 1.14.1 + jupytext_version: 1.15.2 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -189,7 +189,7 @@ We will fix the isotropic BG as we are not sensitive to it with this dataset. We ```python model.LAT_isodiff_Normalization.fix = True -model._4FGL_J0544d4p2238.spectrum.main.Powerlaw.K.fix = True +model.x4FGL_J0544d4p2238.spectrum.main.Powerlaw.K.fix = True model.display() ``` @@ -225,7 +225,7 @@ Or we might want to produce a contour plot ```python res = jl.get_contours( - 'PSR_J0534p2200.spectrum.main.Super_cutoff_powerlaw.K',1.6e-13,2.2e-13, 20, + 'PSR_J0534p2200.spectrum.main.Super_cutoff_powerlaw.K',2.1e-13,2.7e-13, 20, 'PSR_J0534p2200.spectrum.main.Super_cutoff_powerlaw.index',-2.0,-1.7, 20 ) ``` diff --git a/docs/md_docs/slow_execute/LAT_Transient_Builder_Example.md b/docs/md_docs/slow_execute/LAT_Transient_Builder_Example.md index 353b51b5a..ab581df90 100644 --- a/docs/md_docs/slow_execute/LAT_Transient_Builder_Example.md +++ b/docs/md_docs/slow_execute/LAT_Transient_Builder_Example.md @@ -1,3 +1,5 @@ +# Analysis of GRB 190114C with Fermi-LAT + ```python import matplotlib.pyplot as plt @@ -109,18 +111,30 @@ zmax = 110.0 thetamax = 180.0 irfs = "p8_transient020e" strategy = "time" -myLATdataset.extract_events(roi, zmax, irfs, thetamax, strategy="time") +myLATdataset.extract_events(roi, zmax, irfs, thetamax, strategy=strategy) ``` ```python %matplotlib inline -event_file = pyfits.open(myLATdataset.filt_file) -event_times = sorted(event_file["EVENTS"].data["TIME"] - myGRB["MET"]) -intervals = event_times[0::10] -_ = plt.hist(event_times) -_ = plt.hist(event_times, intervals, histtype="step") -# plt.show() +with pyfits.open(myLATdataset.filt_file) as event_file: + lat_events = event_file["EVENTS"].data +event_times = lat_events["TIME"] - myGRB["MET"] + +intervals = [0, 10, 30, 80, 180] +fig, axs = plt.subplots(2, 1, sharex=True) +plt.sca(axs[0]) +plt.hist(event_times); +plt.hist(event_times, intervals, histtype="step"); +plt.ylabel('Events') +plt.sca(axs[1]) +plt.scatter(event_times, lat_events['ENERGY'], marker='o', c=lat_events['ENERGY'], norm='log', + alpha=0.5, zorder=20) +plt.yscale('log') +plt.ylabel('Energy [MeV]') +plt.xlabel('Time - T0 [s]') +plt.grid(True) ``` + tstarts and tstops are defined as strings, with somma separated values for the starts and the ends of the time bins: For example tsrats="0,1,10" and tstops="1,10,20". To convert arrays in string we use these few lines of code: @@ -131,7 +145,9 @@ for t0, t1 in zip(intervals[:-1], intervals[1:]): tstops += "%.4f," % t1 pass tstarts = tstarts[:-1].replace("-", "\\-") +print(tstarts) tstops = tstops[:-1].replace("-", "\\-") +print(tstops) ``` We can now make an instance the LAT transient builder @@ -153,9 +169,6 @@ analysis_builder = TransientLATDataBuilder( df = analysis_builder.display(get=True) ``` -```python -tstops -``` The run method will run (using gtburst) all the fermitools needed to obtain the needed file for the likelihood analysis (livetimecubes, exposure maps. It will also perfom a simple likelihood analysis with the standard likelihood of the fermitools (pylikelihood). The dataproducts created here will be used by threeML to make the fit. @@ -184,7 +197,7 @@ Now we can perform the fit in each bin. Note that we set the model, and we set s ```python results = {} -update_logging_level("DEBUG") +#update_logging_level("DEBUG") for T0, T1 in zip(intervals[:-1], intervals[1:]): GRB = PointSource( @@ -215,13 +228,13 @@ for T0, T1 in zip(intervals[:-1], intervals[1:]): You can usethis function to graphically display the results of your fit (folded model, data and residuals) -```python -i = 3 +```python tags=["nbsphinx-thumbnail"] +i = 0 T0, T1 = intervals[i], intervals[i + 1] LAT_name = "LAT_%06.3f-%06.3f" % (T0, T1) jl = results[LAT_name] jl.results.display() -display_spectrum_model_counts(jl, step=False, figsize=(10, 10)) +display_spectrum_model_counts(jl, figsize=(10,8)); ``` We can see the evolution of the spectrum with time (not all the bins are diplayed): @@ -229,7 +242,7 @@ We can see the evolution of the spectrum with time (not all the bins are diplaye ```python fig = plot_spectra( - *[results[k].results for k in list(results.keys())[::2]], + *[a.results for a in results.values()], ene_min=100 * u.MeV, ene_max=10 * u.GeV, flux_unit="erg2/(cm2 s MeV)", @@ -238,7 +251,7 @@ fig = plot_spectra( contour_cmap="viridis", contour_style_kwargs=dict(alpha=0.1) ) -fig.set_size_inches(10, 10) +fig.set_size_inches(10, 8) ``` @@ -255,7 +268,6 @@ for n in variates: x = [] dx = [] - for T0, T1 in zip(intervals[:-1], intervals[1:]): LAT_name = "LAT_%06.3f-%06.3f" % (T0, T1) x.append((T1 + T0) / 2) @@ -270,10 +282,8 @@ for T0, T1 in zip(intervals[:-1], intervals[1:]): y[n].append(my_variate.median) y[n + "_p"].append(my_variate.equal_tail_interval()[1] - my_variate.median) y[n + "_n"].append(my_variate.median - my_variate.equal_tail_interval()[0]) - pass - pass -fig = plt.figure(figsize=(10, 15)) +fig = plt.figure(figsize=(8, 12)) colors = ["r", "b"] ylabels = ["Flux [100MeV - 10GeV] \n $\gamma$ cm$^{-2}$ s$^{-1}$", "index"] for i, n in enumerate(variates): @@ -281,7 +291,5 @@ for i, n in enumerate(variates): plt.errorbar(x, y[n], xerr=dx, yerr=(y[n + "_n"], y[n + "_p"]), ls="", c=colors[i]) if i == 0: plt.yscale("log") - # plt.xscale('log') plt.ylabel(ylabels[i]) - pass ``` diff --git a/docs/requirements.txt b/docs/requirements.txt index e7050f111..c9a5744db 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -10,3 +10,5 @@ numpy pandas numba sphinx_rtd_dark_mode +mock +recommonmark \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index fa39daaa8..6a0654bab 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,7 +10,7 @@ url = https://github.com/threeml/threeML author_email = giacomo.vianello@gmail.com author = Giacomo Vianello -requires_python = >=3.7.0 +requires_python = >=3.9.0 project_urls = @@ -23,7 +23,7 @@ classifiers = Topic :: Scientific/Engineering :: Astronomy Intended Audience :: Science/Research Operating System :: POSIX - Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.9 Environment :: Console [options] @@ -33,8 +33,8 @@ install_requires = numpy>=1.16 scipy>=1.4 emcee>=3 - astropy>=1.3.3 - matplotlib + astropy + matplotlib<3.9 uncertainties pyyaml>=5.1 dill diff --git a/threeML/__init__.py b/threeML/__init__.py index 69e95b580..f54371622 100644 --- a/threeML/__init__.py +++ b/threeML/__init__.py @@ -22,6 +22,10 @@ from pathlib import Path from threeML.io.logging import setup_logger + +# Import everything from astromodels +from astromodels import * + from .config import ( threeML_config, show_configuration, @@ -62,10 +66,6 @@ import traceback from importlib.machinery import SourceFileLoader -# Import everything from astromodels -from astromodels import * - - # Finally import the serialization machinery from .io.serialization import * diff --git a/threeML/analysis_results.py b/threeML/analysis_results.py index db6bae383..3444b2a63 100644 --- a/threeML/analysis_results.py +++ b/threeML/analysis_results.py @@ -14,7 +14,7 @@ import astropy.units as u import h5py import matplotlib.pyplot as plt -import matplotlib.cm as cm +from matplotlib import colormaps import numpy as np import pandas as pd import yaml @@ -234,9 +234,8 @@ def __init__(self, analysis_results, hdf_obj): hdf_obj.create_dataset( "UNIT", - data=np.array(data_frame["unit"].values, dtype=np.unicode_).astype( - h5py.string_dtype() - ), + data=np.array(data_frame["unit"].values, dtype=np.str_).astype( + h5py.string_dtype()), compression="gzip", compression_opts=9, shuffle=True, @@ -395,7 +394,7 @@ def __init__(self, analysis_results): ("NEGATIVE_ERROR", data_frame["negative_error"].values), ("POSITIVE_ERROR", data_frame["positive_error"].values), ("ERROR", data_frame["error"].values), - ("UNIT", np.array(data_frame["unit"].values, np.unicode_)), + ("UNIT", np.array(data_frame["unit"].values, np.str_)), ("COVARIANCE", covariance_matrix), ("LOG_PROB", dummy), ("SAMPLES", samples), @@ -1125,7 +1124,7 @@ def corner_plot( corner_style = threeML_config.bayesian.corner_style - cmap = cm.get_cmap(corner_style.cmap.value) + cmap = colormaps[corner_style.cmap.value] try: cmap.with_extremes( diff --git a/threeML/bayesian/ultranest_sampler.py b/threeML/bayesian/ultranest_sampler.py index bbd00d475..7a9f071ba 100644 --- a/threeML/bayesian/ultranest_sampler.py +++ b/threeML/bayesian/ultranest_sampler.py @@ -133,15 +133,10 @@ def sample(self, quiet=False): param_names = list(self._free_parameters.keys()) - - chain_name = self._kwargs.pop('log_dir') - loglike, ultranest_prior = self._construct_unitcube_posterior(return_copy=True) # UltraNest must be run parallel via an external method - loglike, ultranest_prior = self._construct_unitcube_posterior(return_copy=True) - # We need to check if the MCMC # chains will have a place on # the disk to write and if not, diff --git a/threeML/catalogs/FermiLAT.py b/threeML/catalogs/FermiLAT.py index 30a0d7bc9..6d8d7f417 100644 --- a/threeML/catalogs/FermiLAT.py +++ b/threeML/catalogs/FermiLAT.py @@ -73,7 +73,7 @@ def _get_vo_table_from_source(self): def _source_is_valid(self, source): """ - checks if source name is valid for the 3FGL catalog + checks if source name is valid for the 4FGL catalog :param source: source name :return: bool @@ -140,7 +140,14 @@ def translate(key): return new_table.group_by("name") - def get_model(self, use_association_name=True): + def get_model(self, use_association_name=True, exposure=None, npred_min=0): + ''' + Build the model with FGL sources + :param use_association_name: use the name of the associated source (stored in assoc_name column) + :param exposure: exposure in cm2 * seconds (can be calculated with gtexposure) + :param npred_min: minimum number of predicted events. + :return: model + ''' assert ( self._last_query_results is not None @@ -150,7 +157,11 @@ def get_model(self, use_association_name=True): sources = [] source_names = [] for name, row in self._last_query_results.T.items(): - + if exposure is not None: + npred = row['flux_1_100_gev'] * exposure + log.debug('Source %s npred= %.1e' % (name, npred) ) + if npred < npred_min: + continue # If there is an association and use_association is True, use that name, otherwise the 3FGL name if row["assoc_name"] != "" and use_association_name: diff --git a/threeML/catalogs/catalog_utils.py b/threeML/catalogs/catalog_utils.py index 77ac961c3..573950da2 100644 --- a/threeML/catalogs/catalog_utils.py +++ b/threeML/catalogs/catalog_utils.py @@ -113,6 +113,10 @@ def _get_point_source_from_fgl(fgl_name, catalog_entry, fix=False): this_spectrum.K = float(catalog_entry["lp_flux_density"]) / ( u.cm ** 2 * u.s * u.MeV ) + this_spectrum.K.bounds = ( + this_spectrum.K.value / 1000.0, + this_spectrum.K.value * 1000, + ) else: K = float(catalog_entry["flux_density"]) this_spectrum.K.bounds = (K / 1000.0, K * 1000) @@ -183,14 +187,16 @@ def _get_point_source_from_fgl(fgl_name, catalog_entry, fix=False): this_spectrum.index = -i this_spectrum.gamma = b this_spectrum.piv = E0 * u.MeV + this_spectrum.K = ( conv * K / (u.cm ** 2 * u.s * u.MeV)) - this_spectrum.xc = a ** (-1.0 / b ) * u.MeV - this_spectrum.K.fix = fix this_spectrum.K.bounds = ( this_spectrum.K.value / 1000.0, this_spectrum.K.value * 1000, ) + this_spectrum.xc = a ** (-1.0 / b ) * u.MeV + + this_spectrum.K.fix = fix this_spectrum.xc.fix = fix this_spectrum.index.fix = fix this_spectrum.gamma.fix = fix diff --git a/threeML/classicMLE/joint_likelihood.py b/threeML/classicMLE/joint_likelihood.py index 09fa6cd51..5539c780c 100644 --- a/threeML/classicMLE/joint_likelihood.py +++ b/threeML/classicMLE/joint_likelihood.py @@ -5,7 +5,7 @@ from builtins import object, range, zip import astromodels.core.model -import matplotlib.cm as cm +from matplotlib import colormaps import matplotlib.pyplot as plt import numpy as np import pandas as pd @@ -225,6 +225,7 @@ def fit( :param quiet: If True, print the results (default), otherwise do not print anything :param compute_covariance:If True (default), compute and display the errors and the correlation matrix. + :param n_samples: Number of samples to scan the likelihood. :return: a dictionary with the results on the parameters, and the values of the likelihood at the minimum for each dataset and the total one. """ @@ -382,7 +383,8 @@ def fit( statistical_measures["BIC"] = bic( -total, len(self._free_parameters), total_number_of_data_points ) - + log.debug('likelihood: %.f , Free Parameters: %d, Total number of datapoints: %d' % + (-total, len(self._free_parameters), total_number_of_data_points)) #Workaround for the case of a "fit" with no free parameters #This happens e.g. if you calculate the TS of the only source #in a one-source model. @@ -1254,8 +1256,7 @@ def _plot_contours(self, name1, a, name2, b, cc): bounds.append(cc.max()) # Define the color palette - palette = cm.get_cmap( - threeML_config["mle"]["contour_cmap"].value) # cm.Pastel1 + palette = colormaps[threeML_config["mle"]["contour_cmap"].value] # cm.Pastel1 palette.set_over(threeML_config["mle"]["contour_background"]) palette.set_under(threeML_config["mle"]["contour_background"]) palette.set_bad(threeML_config["mle"]["contour_background"]) @@ -1304,6 +1305,8 @@ def compute_TS(self, source_name, alt_hyp_mlike_df): # Remove this source from the model _ = model_clone.remove_source(source_name) + #import copy + #data_list_clone = copy.deepcopy(self._data_list) # Fit another_jl = JointLikelihood(model_clone, self._data_list) diff --git a/threeML/config/catalog_structure.py b/threeML/config/catalog_structure.py index 3e3cd2c6a..8a46c7510 100644 --- a/threeML/config/catalog_structure.py +++ b/threeML/config/catalog_structure.py @@ -1,9 +1,7 @@ from dataclasses import dataclass, field -from enum import Enum, Flag -from typing import Any, Dict, List, Optional +from typing import Dict, Optional -import matplotlib.pyplot as plt -from omegaconf import II, MISSING, SI, OmegaConf +from omegaconf import MISSING @dataclass(frozen=True) @@ -22,14 +20,16 @@ class CatalogServer: class InstrumentCatalog: catalogs: Dict[str, CatalogServer] = MISSING - @dataclass(frozen=True) class Catalogs: - Fermi: InstrumentCatalog = InstrumentCatalog({"LAT FGL": CatalogServer("https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermilpsc&"), - "GBM burst catalog": CatalogServer("https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermigbrst&"), - "GBM trigger catalog": CatalogServer("https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermigtrig&"), - "LLE catalog": CatalogServer("https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermille&") - }) - - Swift: InstrumentCatalog = InstrumentCatalog({"Swift GRB catalog": CatalogServer( - "https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=swiftgrb&")}) + Fermi: InstrumentCatalog = field(default_factory=lambda: InstrumentCatalog( + {"LAT FGL": CatalogServer("https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermilpsc&"), + "GBM burst catalog": CatalogServer( + "https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermigbrst&"), + "GBM trigger catalog": CatalogServer( + "https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermigtrig&"), + "LLE catalog": CatalogServer("https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermille&") + })) + + Swift: InstrumentCatalog = field(default_factory=lambda: InstrumentCatalog({"Swift GRB catalog": CatalogServer( + "https://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=swiftgrb&")})) diff --git a/threeML/config/config_structure.py b/threeML/config/config_structure.py index cf396bc42..02d280a38 100644 --- a/threeML/config/config_structure.py +++ b/threeML/config/config_structure.py @@ -1,5 +1,5 @@ import logging -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import IntEnum from .catalog_structure import Catalogs, PublicDataServer @@ -8,6 +8,7 @@ from .plugin_structure import Plugins, TimeSeries from .point_source_structure import PointSourceDefaults + # logging class LoggingLevel(IntEnum): DEBUG = logging.DEBUG @@ -19,13 +20,12 @@ class LoggingLevel(IntEnum): @dataclass class Logging: - path: str = "~/.threeml/log" - developer: bool = 'off' - usr: bool = 'on' - console: bool = 'on' + developer: bool = "off" + usr: bool = "on" + console: bool = "on" level: LoggingLevel = LoggingLevel.INFO - startup_warnings: bool = 'on' + startup_warnings: bool = "on" @dataclass @@ -34,30 +34,31 @@ class Parallel: use_parallel: bool = False use_joblib: bool = False + @dataclass class Interface: - progress_bars: bool = 'on' - multi_progress_color: bool = 'on' + progress_bars: bool = "on" + multi_progress_color: bool = "on" multi_progress_cmap: str = "viridis" progress_bar_color: str = "#9C04FF" @dataclass class Config: - logging: Logging = Logging() - parallel: Parallel = Parallel() - interface: Interface = Interface() - plugins: Plugins = Plugins() - time_series: TimeSeries = TimeSeries() - mle: MLEDefault = MLEDefault() - bayesian: BayesianDefault = BayesianDefault() - plotting: GenericPlotting = GenericPlotting() - model_plot: ModelPlotting = ModelPlotting() - point_source: PointSourceDefaults = PointSourceDefaults() + logging: Logging = field(default_factory=lambda: Logging()) + parallel: Parallel = field(default_factory=lambda: Parallel()) + interface: Interface = field(default_factory=lambda: Interface()) + plugins: Plugins = field(default_factory=lambda: Plugins()) + time_series: TimeSeries = field(default_factory=lambda: TimeSeries()) + mle: MLEDefault = field(default_factory=lambda: MLEDefault()) + bayesian: BayesianDefault = field(default_factory=lambda: BayesianDefault()) + plotting: GenericPlotting = field(default_factory=lambda: GenericPlotting()) + model_plot: ModelPlotting = field(default_factory=lambda: ModelPlotting()) + point_source: PointSourceDefaults = field(default_factory=lambda: PointSourceDefaults()) - LAT: PublicDataServer = PublicDataServer(public_ftp_location="ftp://heasarc.nasa.gov/fermi/data", + LAT: PublicDataServer = field(default_factory=lambda: PublicDataServer(public_ftp_location="ftp://heasarc.nasa.gov/fermi/data", public_http_location="https://heasarc.gsfc.nasa.gov/FTP/fermi/data/lat", - query_form="https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi") - GBM: PublicDataServer = PublicDataServer(public_ftp_location="ftp://heasarc.nasa.gov/fermi/data", - public_http_location="https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm") - catalogs: Catalogs = Catalogs() + query_form="https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi")) + GBM: PublicDataServer = field(default_factory=lambda: PublicDataServer(public_ftp_location="ftp://heasarc.nasa.gov/fermi/data", + public_http_location="https://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm")) + catalogs: Catalogs = field(default_factory=lambda: Catalogs()) diff --git a/threeML/config/fitting_structure.py b/threeML/config/fitting_structure.py index 334831e30..ffdddbcb8 100644 --- a/threeML/config/fitting_structure.py +++ b/threeML/config/fitting_structure.py @@ -1,10 +1,8 @@ from dataclasses import dataclass, field -from enum import Enum, Flag -from typing import Any, Dict, List, Optional +from enum import Enum +from typing import Any, Dict, Optional import numpy as np -import matplotlib.pyplot as plt -from omegaconf import II, MISSING, SI, OmegaConf from .plotting_structure import CornerStyle, MPLCmap @@ -19,7 +17,7 @@ class Sampler(Enum): autoemcee = "autoemcee" -_sampler_default = {'emcee': {'n_burnin': 1}} +_sampler_default = {"emcee": {"n_burnin": 1}} class Optimizer(Enum): @@ -30,97 +28,102 @@ class Optimizer(Enum): @dataclass class BayesianDefault: - use_median_fit: bool = False default_sampler: Sampler = Sampler.emcee emcee_setup: Optional[Dict[str, Any]] = field( - default_factory=lambda: {'n_burnin': None, - 'n_iterations': 500, - "n_walkers": 50, - "seed": 5123}) - - multinest_setup: Optional[Dict[str, Any]] = field( - default_factory=lambda: {'n_live_points': 400, - 'chain_name': "chains/fit-", - "resume": False, - "importance_nested_sampling": False, - "auto_clean": False, - + default_factory=lambda: { + "n_burnin": None, + "n_iterations": 500, + "n_walkers": 50, + "seed": 5123, + } + ) - }) + multinest_setup: Optional[Dict[str, Any]] = field( + default_factory=lambda: { + "n_live_points": 400, + "chain_name": "chains/fit-", + "resume": False, + "importance_nested_sampling": False, + "auto_clean": False, + } + ) ultranest_setup: Optional[Dict[str, Any]] = field( - default_factory=lambda: { "min_num_live_points":400, - "dlogz":0.5, - "dKL": 0.5, - "frac_remain": 0.01, - "Lepsilon": 0.001, - "min_ess": 400, - "update_interval_volume_fraction":0.8, - "cluster_num_live_points":40, - "use_mlfriends": True, - "resume": 'overwrite' } + default_factory=lambda: { + "min_num_live_points": 400, + "dlogz": 0.5, + "dKL": 0.5, + "frac_remain": 0.01, + "Lepsilon": 0.001, + "min_ess": 400, + "update_interval_volume_fraction": 0.8, + "cluster_num_live_points": 40, + "use_mlfriends": True, + "resume": "overwrite", + } ) zeus_setup: Optional[Dict[str, Any]] = field( - default_factory=lambda: {'n_burnin': None, - 'n_iterations': 500, - "n_walkers": 50, - "seed": 5123}) - + default_factory=lambda: { + "n_burnin": None, + "n_iterations": 500, + "n_walkers": 50, + "seed": 5123, + } + ) - dynesty_nested_setup: Optional[Dict[str, Any]] = field( - default_factory=lambda: { "n_live_points": 400, - "maxiter": None, - "maxcall": None, - "dlogz": None, - "logl_max": np.inf, - "n_effective": None, - "add_live": True, - "print_func": None, - "save_bounds":True, - "bound":"multi", - "wrapped_params": None, - "sample": "auto", - "periodic": None, - "reflective": None, - "update_interval": None, - "first_update": None, - "npdim": None, - "rstate": None, - "use_pool": None, - "live_points": None, - "logl_args": None, - "logl_kwargs": None, - "ptform_args": None, - "ptform_kwargs": None, - "gradient": None, - "grad_args": None, - "grad_kwargs": None, - "compute_jac": False, - "enlarge": None, - "bootstrap": 0, - "vol_dec": 0.5, - "vol_check": 2.0, - "walks": 25, - "facc": 0.5, - "slices": 5, - "fmove": 0.9, - "max_move": 100, - "update_func": None, - - - }) + default_factory=lambda: { + "n_live_points": 400, + "maxiter": None, + "maxcall": None, + "dlogz": None, + "logl_max": np.inf, + "n_effective": None, + "add_live": True, + "print_func": None, + "save_bounds": True, + "bound": "multi", + "wrapped_params": None, + "sample": "auto", + "periodic": None, + "reflective": None, + "update_interval": None, + "first_update": None, + "npdim": None, + "rstate": None, + "use_pool": None, + "live_points": None, + "logl_args": None, + "logl_kwargs": None, + "ptform_args": None, + "ptform_kwargs": None, + "gradient": None, + "grad_args": None, + "grad_kwargs": None, + "compute_jac": False, + "enlarge": None, + "bootstrap": 0, + "vol_dec": 0.5, + "vol_check": 2.0, + "walks": 25, + "facc": 0.5, + "slices": 5, + "fmove": 0.9, + "max_move": 100, + "update_func": None, + } + ) dynesty_dynmaic_setup: Optional[Dict[str, Any]] = field( default_factory=lambda: { "nlive_init": 500, "maxiter_init": None, "maxcall_init": None, - "dlogz_init": 0.01, + "dlogz_init": 0.01, "logl_max_init": np.inf, "n_effective_init": np.inf, "nlive_batch": 500, @@ -138,9 +141,9 @@ class BayesianDefault: "save_bounds": True, "print_func": None, "live_points": None, - "bound":"multi", + "bound": "multi", "wrapped_params": None, - "sample":"auto", + "sample": "auto", "periodic": None, "reflective": None, "update_interval": None, @@ -166,13 +169,10 @@ class BayesianDefault: "fmove": 0.9, "max_move": 100, "update_func": None, - - - }) - - - - corner_style: CornerStyle =CornerStyle() + } + ) + + corner_style: CornerStyle = field(default_factory=lambda: CornerStyle()) @@ -182,12 +182,12 @@ class MLEDefault: default_minimizer_algorithm: Optional[str] = None default_minimizer_callback: Optional[str] = None contour_cmap: MPLCmap = MPLCmap.Pastel1 - contour_background: str = 'white' - contour_level_1: str = '#ffa372' - contour_level_2: str = '#ed6663' - contour_level_3: str = '#0f4c81' - profile_color: str = 'k' - - profile_level_1: str = '#ffa372' - profile_level_2: str = '#ed6663' - profile_level_3: str = '#0f4c81' + contour_background: str = "white" + contour_level_1: str = "#ffa372" + contour_level_2: str = "#ed6663" + contour_level_3: str = "#0f4c81" + profile_color: str = "k" + + profile_level_1: str = "#ffa372" + profile_level_2: str = "#ed6663" + profile_level_3: str = "#0f4c81" diff --git a/threeML/config/plotting_structure.py b/threeML/config/plotting_structure.py index 9c55cca34..89b5b0a0a 100644 --- a/threeML/config/plotting_structure.py +++ b/threeML/config/plotting_structure.py @@ -1,9 +1,8 @@ from dataclasses import dataclass, field -from enum import Enum, Flag +from enum import Enum from typing import Any, Dict, List, Optional -import numpy as np + import matplotlib.pyplot as plt -from omegaconf import II, MISSING, SI, OmegaConf # type checking matplotlib colormaps MPLCmap = Enum("MPLCmap", zip(plt.colormaps(), plt.colormaps())) @@ -27,6 +26,28 @@ class BinnedSpectrumPlot: background_mpl_kwargs: Optional[Dict[str, Any]] = None +@dataclass +class FermiSpectrumPlot: + total_model_color: str = "k" + show_background_sources: bool = True + shade_fixed_sources: bool = True + shade_secondary_sources: bool = False + fixed_sources_color: str = "grey" + secondary_sources_color: str = "k" + data_cmap: MPLCmap = MPLCmap.Set1 + model_cmap: MPLCmap = MPLCmap.Set1 + background_cmap: MPLCmap = MPLCmap.Set1 + step: bool = False + show_legend: bool = True + show_residuals: bool = True + data_color: Optional[str] = None + model_color: Optional[str] = None + background_color: Optional[str] = None + data_mpl_kwargs: Optional[Dict[str, Any]] = None + model_mpl_kwargs: Optional[Dict[str, Any]] = None + background_mpl_kwargs: Optional[Dict[str, Any]] = None + + @dataclass class DataHistPlot: counts_color: str = "#500472" @@ -39,8 +60,7 @@ class DataHistPlot: @dataclass class PlotStyle: linestyle: Optional[str] = "-" - linewidth: Optional[float] = 1.7 - + linewidth: Optional[float]= 1.7 @dataclass class ContourStyle: @@ -80,14 +100,13 @@ class LegendStyle: @dataclass class PointSourcePlot: - fit_cmap: MPLCmap = MPLCmap.Set1 contour_cmap: MPLCmap = MPLCmap.Set1 bayes_cmap: MPLCmap = MPLCmap.Set1 - plot_style: PlotStyle = PlotStyle() - contour_style: ContourStyle = ContourStyle() + plot_style: PlotStyle = field(default_factory=lambda: PlotStyle()) + contour_style: ContourStyle = field(default_factory=lambda: ContourStyle()) show_legend: bool = True - legend_style: LegendStyle = LegendStyle() + legend_style: LegendStyle = field(default_factory=lambda: LegendStyle()) flux_unit: str = "1/(keV s cm2)" emin: float = 10.0 emax: float = 1e4 @@ -107,10 +126,10 @@ class ResidualPlot: class GenericPlotting: use_threeml_style: bool = True mplstyle: str = "threeml.mplstyle" - residual_plot: ResidualPlot = ResidualPlot() + residual_plot: ResidualPlot = field(default_factory=lambda: ResidualPlot()) @dataclass class ModelPlotting: - point_source_plot: PointSourcePlot = PointSourcePlot() + point_source_plot: PointSourcePlot = field(default_factory=lambda: PointSourcePlot()) diff --git a/threeML/config/plugin_structure.py b/threeML/config/plugin_structure.py index ff2fe50a3..4d1073696 100644 --- a/threeML/config/plugin_structure.py +++ b/threeML/config/plugin_structure.py @@ -1,31 +1,37 @@ from dataclasses import dataclass, field -from enum import Enum, Flag -from typing import Any, Dict, List, Optional -import matplotlib.pyplot as plt -from omegaconf import II, MISSING, SI, OmegaConf - -from .plotting_structure import BinnedSpectrumPlot, DataHistPlot, MPLCmap +from .plotting_structure import ( + BinnedSpectrumPlot, + DataHistPlot, + FermiSpectrumPlot, + MPLCmap, +) @dataclass class OGIP: - fit_plot: BinnedSpectrumPlot = BinnedSpectrumPlot() - data_plot: DataHistPlot = DataHistPlot() + fit_plot: BinnedSpectrumPlot = field(default_factory=lambda: BinnedSpectrumPlot()) + data_plot: DataHistPlot = field(default_factory=lambda: DataHistPlot()) response_cmap: MPLCmap = MPLCmap.viridis response_zero_color: str = "k" +@dataclass +class Fermipy: + fit_plot: FermiSpectrumPlot = field(default_factory=lambda: FermiSpectrumPlot()) +# data_plot: DataHistPlot = DataHistPlot() + + @dataclass class Photo: - fit_plot: BinnedSpectrumPlot = BinnedSpectrumPlot() + fit_plot: BinnedSpectrumPlot = field(default_factory=lambda: BinnedSpectrumPlot()) @dataclass class Plugins: - ogip: OGIP = OGIP() - photo: Photo = Photo() - + ogip: OGIP = field(default_factory=lambda: OGIP()) + photo: Photo = field(default_factory=lambda: Photo()) + fermipy: Fermipy = field(default_factory=lambda: Fermipy()) @dataclass class TimeSeriesFit: @@ -40,4 +46,4 @@ class TimeSeries: selection_color: str = "#1fbfb8" background_color: str = "#C0392B" background_selection_color: str = "#E74C3C" - fit: TimeSeriesFit = TimeSeriesFit() + fit: TimeSeriesFit = field(default_factory=lambda: TimeSeriesFit()) diff --git a/threeML/io/calculate_flux.py b/threeML/io/calculate_flux.py index 630761ad2..23ec63783 100644 --- a/threeML/io/calculate_flux.py +++ b/threeML/io/calculate_flux.py @@ -89,7 +89,15 @@ def _setup_analysis_dictionaries( except: - comps = [] + try: + + comps = [ + c for c in source.components + ] + + except: + + comps = [] # duplicate components comps = [ @@ -129,7 +137,15 @@ def _setup_analysis_dictionaries( except: - comps = [] + try: + + comps = [ + c for c in source.components + ] + + except: + + comps = [] # duplicate components comps = [ @@ -157,11 +173,10 @@ def _setup_analysis_dictionaries( ): # if we want to use this source - if ( not use_components or ("total" in components_to_use) - or (not mle_analyses[key]["component_names"]) + or ('main' in mle_analyses[key]["component_names"]) ): mle_analyses[key][ "fitted point source" @@ -259,7 +274,7 @@ def _setup_analysis_dictionaries( if ( not use_components or ("total" in components_to_use) - or (not bayesian_analyses[key]["component_names"]) + or ('main' in bayesian_analyses[key]["component_names"]) ): bayesian_analyses[key][ "fitted point source" @@ -385,11 +400,17 @@ def _collect_sums_into_dictionaries( "total" in components_to_use ): use_total = True + + if 'main' in list(analyses[key]["components"].keys() + ): + use_total = True - for component in list(analyses[key]["components"].keys()): - component_sum_dict.setdefault(component, []).append( - analyses[key]["components"][component] - ) + else: + + for component in list(analyses[key]["components"].keys()): + component_sum_dict.setdefault(component, []).append( + analyses[key]["components"][component] + ) else: @@ -453,23 +474,29 @@ def _compute_output(analyses, _defaults, out): "total" in _defaults["components_to_use"] ): get_total = True + + if 'main' in list(analyses[key]["components"].keys() + ): + get_total = True - for component in list(analyses[key]["components"].keys()): - # extract the information and plot it + else: - samples = analyses[key]["components"][component] + for component in list(analyses[key]["components"].keys()): + # extract the information and plot it + + samples = analyses[key]["components"][component] - label = f"{key}: {component}" + label = f"{key}: {component}" - _append_best_fit_and_errors( - samples, - _defaults, - label, - fluxes, - p_errors, - n_errors, - labels, - ) + _append_best_fit_and_errors( + samples, + _defaults, + label, + fluxes, + p_errors, + n_errors, + labels, + ) else: @@ -478,14 +505,18 @@ def _compute_output(analyses, _defaults, out): if get_total: # it ends up that we need to plot the total spectrum # which is just a repeat of the process + + try: + samples = analyses[key]["fitted point source"] + except: + log.error("%s component(s) not available in source %s" %\ + (_defaults["components_to_use"], key)) + else: + label = f"{key}: total" - samples = analyses[key]["fitted point source"] - - label = f"{key}: total" - - _append_best_fit_and_errors( - samples, _defaults, label, fluxes, p_errors, n_errors, labels - ) + _append_best_fit_and_errors( + samples, _defaults, label, fluxes, p_errors, n_errors, labels + ) if fluxes: # now make a data frame @@ -586,7 +617,7 @@ def calculate_point_source_flux(*args, **kwargs): log.error( "The use of calculate_point_source_flux is deprecated. Please use the .get_point_source_flux()" " method of the JointLikelihood.results or the BayesianAnalysis.results member. For example:" - " jl.results.get_point_source_flux()." + " jl.results.get_flux()." ) return _calculate_point_source_flux(*args, **kwargs) diff --git a/threeML/io/fits_file.py b/threeML/io/fits_file.py index f8c2d559c..58bccf7df 100644 --- a/threeML/io/fits_file.py +++ b/threeML/io/fits_file.py @@ -199,7 +199,7 @@ def __init__(self, data_tuple, header_tuple): # see if it is a string array - if test_value.dtype.type == np.string_: + if test_value.dtype.type == np.bytes_: max_string_length = max(column_data, key=len).dtype.itemsize diff --git a/threeML/io/plotting/cmap_cycle.py b/threeML/io/plotting/cmap_cycle.py index e90d648fa..6956a95fc 100644 --- a/threeML/io/plotting/cmap_cycle.py +++ b/threeML/io/plotting/cmap_cycle.py @@ -3,6 +3,7 @@ __author__ = "grburgess" import matplotlib.pyplot as plt +from matplotlib import colormaps import numpy as np @@ -44,7 +45,7 @@ def cmap_intervals(length=50, cmap="YlOrBr", start=None, stop=None): successive lines because successive colors are very similar. :param cmap: str name of a matplotlib colormap (see matplotlib.pyplot.cm) """ - cm = plt.cm.get_cmap(cmap) + cm = colormaps[cmap] # qualitative color maps if cmap in [ diff --git a/threeML/io/plotting/post_process_data_plots.py b/threeML/io/plotting/post_process_data_plots.py index 2df413473..c32d3d89d 100644 --- a/threeML/io/plotting/post_process_data_plots.py +++ b/threeML/io/plotting/post_process_data_plots.py @@ -1,5 +1,5 @@ import matplotlib.pyplot as plt -import matplotlib.cm as cm +from matplotlib import colormaps import numpy as np import threeML.plugins.PhotometryLike as photolike import threeML.plugins.SpectrumLike as speclike @@ -7,9 +7,16 @@ try: from threeML.plugins.FermiLATLike import FermiLATLike - LATLike = True + FermiLATLike_flag = True except: - LATLike = False + FermiLATLike_flag = False + +try: + from threeML.plugins.FermipyLike import FermipyLike + + FermipyLike_flag = True +except: + FermipyLike_flag = False from threeML.config.config import threeML_config from threeML.config.plotting_structure import BinnedSpectrumPlot @@ -87,11 +94,13 @@ def display_spectrum_model_counts(analysis, data=(), **kwargs): if isinstance(analysis.data_list[key], speclike.SpectrumLike): new_data_keys.append(key) - elif LATLike and isinstance(analysis.data_list[key], FermiLATLike): + elif FermiLATLike_flag and isinstance(analysis.data_list[key], FermiLATLike): + new_data_keys.append(key) + elif FermipyLike_flag and isinstance(analysis.data_list[key], FermipyLike): new_data_keys.append(key) else: log.warning( - "Dataset %s is not of the SpectrumLike or FermiLATLike kind. Cannot be plotted by display_spectrum_model_counts" + "Dataset %s is not of the SpectrumLike, FermiLATLike or FermipyLATLike kind. Cannot be plotted by display_spectrum_model_counts" % key ) @@ -583,7 +592,7 @@ def display_photometry_model_magnitudes(analysis, data=(), **kwargs): step = bool(kwargs.pop("step")) if "data_cmap" in kwargs: - data_cmap = cm.get_cmap(kwargs.pop("data_cmap")) + data_cmap = colormaps[kwargs.pop("data_cmap")] data_colors = cmap_intervals(len(data_keys), data_cmap) if "model_cmap" in kwargs: diff --git a/threeML/minimizer/tutorial_material.py b/threeML/minimizer/tutorial_material.py index 0920fc98d..32464f605 100644 --- a/threeML/minimizer/tutorial_material.py +++ b/threeML/minimizer/tutorial_material.py @@ -3,6 +3,7 @@ from builtins import map, range, zip import matplotlib.pyplot as plt +from matplotlib import colormaps import numpy as np from astromodels import (Function1D, FunctionMeta, Gaussian, Model, PointSource, use_astromodels_memoization) @@ -138,7 +139,7 @@ def plot_minimizer_path(jl, points=False): # Color map N = len(qx_sets) - cmap = plt.cm.get_cmap("gist_earth", N + 1) + cmap = colormaps["gist_earth"].resampled(N + 1) for i, (qx, qy) in enumerate(zip(qx_sets, qy_sets)): diff --git a/threeML/plugin_prototype.py b/threeML/plugin_prototype.py index f100706ee..539815b5d 100644 --- a/threeML/plugin_prototype.py +++ b/threeML/plugin_prototype.py @@ -67,6 +67,8 @@ def __init__(self, name: str, nuisance_parameters: Dict[str, Parameter]): self._tag = None + self._exclude_from_fit = False + def get_name(self) -> str: log.warning( "Do not use get_name() for plugins, use the .name property", @@ -185,6 +187,14 @@ def _set_tag(self, spec): "[end])", ) + def exclude_from_fit(self,flag=False): + """ + This can be used to explude a plug in from the fit + :param flag: True or Fase (default) + :return: + """ + log.info("Plug in %s had beed expluded from the fit" % self.name) + self._exclude_from_fit = flag ###################################################################### # The following methods must be implemented by each plugin ###################################################################### diff --git a/threeML/plugins/FermiLATLike.py b/threeML/plugins/FermiLATLike.py index 4631cf412..b23532560 100644 --- a/threeML/plugins/FermiLATLike.py +++ b/threeML/plugins/FermiLATLike.py @@ -132,8 +132,8 @@ def write_xml(self, xmlfile, ra, dec, roi) -> List[str]: iso = LikelihoodComponent.IsotropicTemplate(self.irfs) - iso.source.spectrum.Normalization.max = 1.5 - iso.source.spectrum.Normalization.min = 0.5 + iso.source.spectrum.Normalization.max = 5.0 + iso.source.spectrum.Normalization.min = 0.1 iso.source.spectrum.setAttributes() all_sources_for_pylike.append(iso) @@ -324,9 +324,11 @@ def __init__( self.n_energies = 200 with fits.open(event_file) as file: - self.__observation_duration = ( - file[0].header["TSTOP"] - file[0].header["TSTART"] - ) + #self.__observation_duration = ( + # file[0].header["TSTOP"] - file[0].header["TSTART"] + #) + self.__observation_duration = (file['GTI'].data.STOP - file['GTI'].data.START).sum() + print('FermiLATLike - GTI SUM = ', self.__observation_duration) # This is the limit on the effective area correction factor, # which is a multiplicative factor in front of the whole model @@ -482,6 +484,14 @@ def set_model( self.update_nuisance_parameters(new_nuisance_parameters) + def get_number_of_data_points(self): + + number_of_data_points = self.like.total_nobs() + + log.debug("Number of events in LAT likelihood fit: %d" % number_of_data_points) + + return number_of_data_points + def clear_source_name(self) -> None: if self._source_name is not None: @@ -603,7 +613,7 @@ def get_log_like(self): self.like.syncSrcParams() log_like = self.like.logLike.value() - + if self._exclude_from_fit: log_like*=0 return log_like - logfactorial(int(self.like.total_nobs())) # @@ -754,7 +764,7 @@ def display_model( :param background_kwargs: plotting kwargs affecting the plotting of the background :return: """ - + debug=False # set up the default plotting _default_model_kwargs = dict(color=model_color, alpha=1) @@ -877,7 +887,7 @@ def display_model( e1 = self.like.energies[:-1] * 1000.0 # this has to be in keV e2 = self.like.energies[1:] * 1000.0 # this has to be in keV - + if debug: print("ENERGIES [MeV]",self.like.energies) ec = (e1 + e2) / 2.0 de = (e2 - e1) / 2.0 @@ -886,14 +896,14 @@ def display_model( self.like._srcCnts(self.like.sourceNames()[0]) ) - sum_backgrounds = numpy.zeros_like( - self.like._srcCnts(self.like.sourceNames()[0]) - ) + sum_backgrounds = numpy.zeros_like(sum_model) for source_name in self.like.sourceNames(): source_counts = self.like._srcCnts(source_name) + if debug: print (source_name,' source_counts=', source_counts) + sum_model = sum_model + source_counts if source_name != self._source_name: sum_backgrounds = sum_backgrounds + source_counts @@ -904,6 +914,10 @@ def display_model( label=source_name, # , **_default_model_kwargs ) # sub.plot(ec, self.like._srcCnts(source_name), label=source_name) + + if debug: print ('sum_model=', sum_model) + if debug: print ('sum_backgrounds=',sum_backgrounds) + residual_plot.add_model( ec, sum_model / conversion_factor, @@ -916,9 +930,11 @@ def display_model( y = self.like.nobs y_err = numpy.sqrt(y) + if debug: print ('counts=', y) + significance_calc = Significance(Non=y, Noff=sum_backgrounds) - if ratio_residuals: + if False: resid = old_div((self.like.nobs - sum_model), sum_model) resid_err = old_div(y_err, sum_model) else: diff --git a/threeML/plugins/FermipyLike.py b/threeML/plugins/FermipyLike.py index dc509c4cf..b702ce5db 100644 --- a/threeML/plugins/FermipyLike.py +++ b/threeML/plugins/FermipyLike.py @@ -1,30 +1,37 @@ from __future__ import division -from past.utils import old_div + +import collections +import os +from typing import Any, Dict, List, Optional, Tuple, Union + import astromodels +import astropy.io.fits as fits import numpy as np -import os import yaml -import astropy.io.fits as fits -from astropy.stats import circmean -from astropy import units as u -import collections - from astromodels import Model, Parameter from astromodels.core import parameter_transformation - +from astropy import units as u +from astropy.stats import circmean +from past.utils import old_div +from threeML.config.config import threeML_config +from threeML.config.plotting_structure import FermiSpectrumPlot from threeML.exceptions.custom_exceptions import custom_warnings +from threeML.io.dict_with_pretty_print import DictWithPrettyPrint from threeML.io.file_utils import sanitize_filename +from threeML.io.logging import setup_logger +from threeML.io.package_data import get_path_of_data_file +from threeML.io.plotting.cmap_cycle import cmap_intervals +from threeML.io.plotting.data_residual_plot import ResidualPlot from threeML.plugin_prototype import PluginPrototype +from threeML.utils.power_of_two_utils import is_power_of_2 from threeML.utils.statistics.gammaln import logfactorial +from threeML.utils.statistics.stats_tools import Significance from threeML.utils.unique_deterministic_tag import get_unique_deterministic_tag -from threeML.utils.power_of_two_utils import is_power_of_2 -from threeML.io.package_data import get_path_of_data_file -from threeML.io.dict_with_pretty_print import DictWithPrettyPrint -from threeML.io.logging import setup_logger log = setup_logger(__name__) from threeML.io.logging import setup_logger + log = setup_logger(__name__) __instrument_name = "Fermi LAT (with fermipy)" @@ -58,7 +65,16 @@ def _get_unique_tag_from_configuration(configuration): ("binning", ("roiwidth", "binsz", "binsperdec")), ( "selection", - ("emin", "emax", "zmax", "evclass", "evtype", "filter", "ra", "dec"), + ( + "emin", + "emax", + "zmax", + "evclass", + "evtype", + "filter", + "ra", + "dec", + ), ), ) @@ -75,7 +91,8 @@ def _get_unique_tag_from_configuration(configuration): if not key in configuration[section]: log.critical( - "Section %s in configuration lacks key %s, which is required" % key + "Section %s in configuration lacks key %s, which is required" + % key ) string_to_hash.append("%s" % configuration[section][key]) @@ -134,10 +151,13 @@ def _get_fermipy_instance(configuration, likelihood_model): # analysis a lot) # NOTE: these are going to be absolute paths - galactic_template = str( sanitize_filename( - findGalacticTemplate(irfs, ra_center, dec_center, roi_radius), True # noqa: F821 - ) ) - isotropic_template = str( sanitize_filename(findIsotropicTemplate(irfs), True) ) # noqa: F821 + galactic_template = str( + sanitize_filename( + findGalacticTemplate(irfs, ra_center, dec_center, roi_radius), True, # noqa: F821 + ) + ) + isotropic_template = str( + sanitize_filename(findIsotropicTemplate(irfs), True)) # noqa: F821 # Add them to the fermipy model @@ -152,7 +172,11 @@ def _get_fermipy_instance(configuration, likelihood_model): likelihood_model.point_sources.values() ): # type: astromodels.PointSource - this_source = {"Index": 2.56233, "Scale": 572.78, "Prefactor": 2.4090e-12} + this_source = { + "Index": 2.56233, + "Scale": 572.78, + "Prefactor": 2.4090e-12, + } this_source["name"] = point_source.name this_source["ra"] = point_source.position.ra.value this_source["dec"] = point_source.position.dec.value @@ -168,14 +192,18 @@ def _get_fermipy_instance(configuration, likelihood_model): likelihood_model.extended_sources.values() ): # type: astromodels.ExtendedSource - this_source = {"Index": 2.56233, "Scale": 572.78, "Prefactor": 2.4090e-12} + this_source = { + "Index": 2.56233, + "Scale": 572.78, + "Prefactor": 2.4090e-12, + } this_source["name"] = extended_source.name # The spectrum used here is unconsequential, as it will be substituted by a FileFunction # later on. So I will just use PowerLaw for everything this_source["SpectrumType"] = "PowerLaw" - + theShape = extended_source.spatial_shape - + if theShape.name == "Disk_on_sphere": this_source["SpatialModel"] = "RadialDisk" this_source["ra"] = theShape.lon0.value @@ -186,28 +214,31 @@ def _get_fermipy_instance(configuration, likelihood_model): this_source["SpatialModel"] = "RadialGaussian" this_source["ra"] = theShape.lon0.value this_source["dec"] = theShape.lat0.value - #fermipy/fermi tools expect 68% containment radius = 1.36 sigma + # fermipy/fermi tools expect 68% containment radius = 1.36 sigma this_source["SpatialWidth"] = 1.36 * theShape.sigma.value elif theShape.name == "SpatialTemplate_2D": - + try: (ra_min, ra_max), (dec_min, dec_max) = theShape.get_boundaries() - this_source["ra"] = circmean( [ra_min, ra_max]*u.deg ).value - this_source["dec"] = circmean( [dec_min, dec_max]*u.deg ).value - + this_source["ra"] = circmean([ra_min, ra_max] * u.deg).value + this_source["dec"] = circmean([dec_min, dec_max] * u.deg).value + except: - log.critical( f"Source {extended_source.name} does not have a template file set; must call read_file first()" ) - + log.critical( + f"Source {extended_source.name} does not have a template file set; must call read_file first()" + ) + this_source["SpatialModel"] = "SpatialMap" this_source["Spatial_Filename"] = theShape._fitsfile else: - - log.critical(f"Extended source {extended_source.name}: shape {theShape.name} not yet implemented for FermipyLike") - sources.append(this_source) + log.critical( + f"Extended source {extended_source.name}: shape {theShape.name} not yet implemented for FermipyLike" + ) + sources.append(this_source) # Add all sources to the model fermipy_model["sources"] = sources @@ -230,12 +261,14 @@ def _get_fermipy_instance(configuration, likelihood_model): ): # type: astromodels.PointSource # This will substitute the current spectrum with a FileFunction with the same shape and flux - gta.set_source_spectrum(point_source.name, "FileFunction", update_source=False) + gta.set_source_spectrum( + point_source.name, "FileFunction", update_source=False + ) # Get the energies at which to evaluate this source this_log_energies, _flux = gta.get_source_dnde(point_source.name) this_energies_keV = ( - 10 ** this_log_energies * 1e3 + 10**this_log_energies * 1e3 ) # fermipy energies are in GeV, we need keV if energies_keV is None: @@ -247,10 +280,12 @@ def _get_fermipy_instance(configuration, likelihood_model): # This is to make sure that all sources are evaluated at the same energies if not np.all(energies_keV == this_energies_keV): - log.critical("All sources should be evaluated at the same energies.") + log.critical( + "All sources should be evaluated at the same energies." + ) dnde = point_source(energies_keV) # ph / (cm2 s keV) - dnde_per_MeV = np.maximum(dnde * 1000.0, 1e-300) # ph / (cm2 s MeV) + dnde_per_MeV = np.maximum(dnde * 1000.0, 1e-300) # ph / (cm2 s MeV) gta.set_source_dnde(point_source.name, dnde_per_MeV, False) # Same for extended source @@ -259,12 +294,14 @@ def _get_fermipy_instance(configuration, likelihood_model): ): # type: astromodels.ExtendedSource # This will substitute the current spectrum with a FileFunction with the same shape and flux - gta.set_source_spectrum(extended_source.name, "FileFunction", update_source=False) + gta.set_source_spectrum( + extended_source.name, "FileFunction", update_source=False + ) # Get the energies at which to evaluate this source this_log_energies, _flux = gta.get_source_dnde(extended_source.name) this_energies_keV = ( - 10 ** this_log_energies * 1e3 + 10**this_log_energies * 1e3 ) # fermipy energies are in GeV, we need keV if energies_keV is None: @@ -276,10 +313,14 @@ def _get_fermipy_instance(configuration, likelihood_model): # This is to make sure that all sources are evaluated at the same energies if not np.all(energies_keV == this_energies_keV): - log.critical("All sources should be evaluated at the same energies.") + log.critical( + "All sources should be evaluated at the same energies." + ) - dnde = extended_source.get_spatially_integrated_flux(energies_keV) # ph / (cm2 s keV) - dnde_per_MeV = np.maximum(dnde * 1000.0, 1e-300) # ph / (cm2 s MeV) + dnde = extended_source.get_spatially_integrated_flux( + energies_keV + ) # ph / (cm2 s keV) + dnde_per_MeV = np.maximum(dnde * 1000.0, 1e-300) # ph / (cm2 s MeV) gta.set_source_dnde(extended_source.name, dnde_per_MeV, False) return gta, energies_keV @@ -288,7 +329,10 @@ def _get_fermipy_instance(configuration, likelihood_model): def _expensive_imports_hook(): from fermipy.gtanalysis import GTAnalysis - from GtBurst.LikelihoodComponent import findGalacticTemplate, findIsotropicTemplate + from GtBurst.LikelihoodComponent import ( + findGalacticTemplate, + findIsotropicTemplate, + ) globals()["GTAnalysis"] = GTAnalysis globals()["findGalacticTemplate"] = findGalacticTemplate @@ -321,7 +365,9 @@ def __init__(self, name, fermipy_config): nuisance_parameters = {} - super(FermipyLike, self).__init__(name, nuisance_parameters=nuisance_parameters) + super(FermipyLike, self).__init__( + name, nuisance_parameters=nuisance_parameters + ) # Check whether the provided configuration is a file @@ -370,20 +416,22 @@ def __init__(self, name, fermipy_config): # As minimum there must be a evfile and a scfile if not "evfile" in self._configuration["data"]: - log.critical( "You must provide a evfile in the data section" ) + log.critical("You must provide a evfile in the data section") if not "scfile" in self._configuration["data"]: - log.critical( "You must provide a scfile in the data section" ) + log.critical("You must provide a scfile in the data section") for datum in self._configuration["data"]: # Sanitize file name, as fermipy is not very good at handling relative paths or env. variables - filename = str( sanitize_filename(self._configuration["data"][datum], True) ) + filename = str( + sanitize_filename(self._configuration["data"][datum], True) + ) self._configuration["data"][datum] = filename - if not os.path.exists( self._configuration["data"][datum] ): - log.critical( "File %s (%s) not found" % (filename, datum) ) + if not os.path.exists(self._configuration["data"][datum]): + log.critical("File %s (%s) not found" % (filename, datum)) # Prepare the 'fileio' part # Save all output in a directory with a unique name which depends on the configuration, @@ -397,7 +445,10 @@ def __init__(self, name, fermipy_config): self._configuration["fileio"] = {"outdir": self._unique_id} # Ensure that there is a complete definition of a Region Of Interest (ROI) - if not (("ra" in self._configuration["selection"]) and ("dec" in self._configuration["selection"])): + if not ( + ("ra" in self._configuration["selection"]) + and ("dec" in self._configuration["selection"]) + ): log.critical( "You have to provide 'ra' and 'dec' in the 'selection' section of the configuration. Source name " "resolution, as well as Galactic coordinates, are not currently supported" @@ -405,10 +456,9 @@ def __init__(self, name, fermipy_config): # This is empty at the beginning, will be instanced in the set_model method self._gta = None - + # observation_duration = self._configuration["selection"]["tmax"] - self._configuration["selection"]["tmin"] self.set_inner_minimization(True) - @staticmethod def get_basic_config( evfile, @@ -421,8 +471,8 @@ def get_basic_config( evclass=128, evtype=3, filter="DATA_QUAL>0 && LAT_CONFIG==1", - fermipy_verbosity = 2, - fermitools_chatter = 2, + fermipy_verbosity=2, + fermitools_chatter=2, ): from fermipy.config import ConfigManager @@ -432,13 +482,13 @@ def get_basic_config( get_path_of_data_file("fermipy_basic_config.yml") ) # type: dict - evfile = str(sanitize_filename(evfile) ) - scfile = str(sanitize_filename(scfile) ) + evfile = str(sanitize_filename(evfile)) + scfile = str(sanitize_filename(scfile)) if not os.path.exists(evfile): - log.critical( "The provided evfile %s does not exist" % evfile ) + log.critical("The provided evfile %s does not exist" % evfile) if not os.path.exists(scfile): - log.critical( "The provided scfile %s does not exist" % scfile ) + log.critical("The provided scfile %s does not exist" % scfile) basic_config["data"]["evfile"] = evfile basic_config["data"]["scfile"] = scfile @@ -446,13 +496,15 @@ def get_basic_config( ra = float(ra) dec = float(dec) - if not (( 0 <= ra) and ( ra <= 360 )): + if not ((0 <= ra) and (ra <= 360)): log.critical( - "The provided R.A. (%s) is not valid. Should be 0 <= ra <= 360.0" % ra + "The provided R.A. (%s) is not valid. Should be 0 <= ra <= 360.0" + % ra ) if not ((-90 <= dec) and (dec <= 90)): - log.critical ( - "The provided Dec (%s) is not valid. Should be -90 <= dec <= 90.0" % dec + log.critical( + "The provided Dec (%s) is not valid. Should be -90 <= dec <= 90.0" + % dec ) basic_config["selection"]["ra"] = ra @@ -465,7 +517,7 @@ def get_basic_config( basic_config["selection"]["emax"] = emax zmax = float(zmax) - if not (( 0.0 <= zmax) and (zmax <= 180.0)): + if not ((0.0 <= zmax) and (zmax <= 180.0)): log.critical( "The provided Zenith angle cut (zmax = %s) is not valid. " "Should be 0 <= zmax <= 180.0" % zmax @@ -476,13 +528,13 @@ def get_basic_config( with fits.open(scfile) as ft2_: tmin = float(ft2_[0].header["TSTART"]) tmax = float(ft2_[0].header["TSTOP"]) - + basic_config["selection"]["tmin"] = tmin basic_config["selection"]["tmax"] = tmax evclass = int(evclass) if not is_power_of_2(evclass): - log.critical( "The provided evclass is not a power of 2." ) + log.critical("The provided evclass is not a power of 2.") basic_config["selection"]["evclass"] = evclass @@ -493,9 +545,10 @@ def get_basic_config( basic_config["selection"]["filter"] = filter basic_config["logging"]["verbosity"] = fermipy_verbosity - #(In fermipy convention, 0 = critical only, 1 also errors, 2 also warnings, 3 also info, 4 also debug) - basic_config["logging"]["chatter"] = fermitools_chatter #0 = no screen output. 2 = some output, 4 = lot of output. - + # (In fermipy convention, 0 = critical only, 1 also errors, 2 also warnings, 3 also info, 4 also debug) + basic_config["logging"][ + "chatter" + ] = fermitools_chatter # 0 = no screen output. 2 = some output, 4 = lot of output. return DictWithPrettyPrint(basic_config) @@ -519,6 +572,27 @@ def gta(self): return self._gta + def get_observation_duration(self): + event_file = self._configuration["data"]["evfile"] + tmin = self._configuration["selection"]["tmin"] + tmax = self._configuration["selection"]["tmax"] + with fits.open(event_file) as file: + gti_start = file["GTI"].data.START + gti_stop = file["GTI"].data.STOP + + myfilter = (gti_stop > tmin) * (gti_start < tmax) + + gti_sum = (gti_stop[myfilter] - gti_start[myfilter]).sum() + + observation_duration = ( + gti_sum + - (tmin - gti_start[myfilter][0]) + - (gti_stop[myfilter][-1] - tmax) + ) + + log.info(f"FermipyLike - GTI SUM...:{observation_duration}") + return observation_duration + def set_model(self, likelihood_model_instance): """ Set the model to be used in the joint minimization. Must be a LikelihoodModel instance. @@ -532,14 +606,15 @@ def set_model(self, likelihood_model_instance): self._gta, self._pts_energies = _get_fermipy_instance( self._configuration, likelihood_model_instance ) - self._update_model_in_fermipy( update_dictionary = True, force_update = True) - + self._update_model_in_fermipy(update_dictionary=True, force_update=True) + # Build the list of the nuisance parameters new_nuisance_parameters = self._set_nuisance_parameters() self.update_nuisance_parameters(new_nuisance_parameters) - - def _update_model_in_fermipy(self, update_dictionary = False, delta = 0.0, force_update = False): + def _update_model_in_fermipy( + self, update_dictionary=False, delta=0.0, force_update=False + ): # Substitute all spectra for point sources with FileSpectrum, so that we will be able to control # them from 3ML @@ -547,25 +622,34 @@ def _update_model_in_fermipy(self, update_dictionary = False, delta = 0.0, force self._likelihood_model.point_sources.values() ): # type: astromodels.PointSource - - #Update this source only if it has free parameters (to gain time) - if not ( point_source.has_free_parameters or force_update): + # Update this source only if it has free parameters (to gain time) + if not (point_source.has_free_parameters or force_update): continue - #Update source position if free - if force_update or ( point_source.position.ra.free or point_source.position.dec.free ): - + # Update source position if free + if force_update or ( + point_source.position.ra.free or point_source.position.dec.free + ): + model_pos = point_source.position.sky_coord - fermipy_pos = self._gta.roi.get_source_by_name(point_source.name).skydir - - if model_pos.separation( fermipy_pos ).to("degree").value > delta : - #modeled after how this is done in fermipy - #(cf https://fermipy.readthedocs.io/en/latest/_modules/fermipy/sourcefind.html#SourceFind.localize) + fermipy_pos = self._gta.roi.get_source_by_name( + point_source.name + ).skydir + + if model_pos.separation(fermipy_pos).to("degree").value > delta: + # modeled after how this is done in fermipy + # (cf https://fermipy.readthedocs.io/en/latest/_modules/fermipy/sourcefind.html#SourceFind.localize) temp_source = self._gta.delete_source(point_source.name) - temp_source.set_position( model_pos ) - self._gta.add_source(point_source.name, temp_source, free=False) + temp_source.set_position(model_pos) + self._gta.add_source( + point_source.name, temp_source, free=False + ) self._gta.free_source(point_source.name, False) - self._gta.set_source_spectrum(point_source.name, "FileFunction", update_source=update_dictionary) + self._gta.set_source_spectrum( + point_source.name, + "FileFunction", + update_source=update_dictionary, + ) # Now set the spectrum of this source to the right one dnde = point_source(self._pts_energies) # ph / (cm2 s keV) @@ -574,66 +658,112 @@ def _update_model_in_fermipy(self, update_dictionary = False, delta = 0.0, force # it does not change the result. # (HF: Not sure who wrote the above but I think sometimes we do want to update fermipy dictionaries.) - self._gta.set_source_dnde(point_source.name, dnde_MeV, update_source = update_dictionary) - + self._gta.set_source_dnde( + point_source.name, dnde_MeV, update_source=update_dictionary + ) + # Same for extended source for extended_source in list( self._likelihood_model.extended_sources.values() ): # type: astromodels.ExtendedSource - #Update this source only if it has free parameters (to gain time) - if not ( extended_source.has_free_parameters or force_update): + # Update this source only if it has free parameters (to gain time) + if not (extended_source.has_free_parameters or force_update): continue theShape = extended_source.spatial_shape if theShape.has_free_parameters or force_update: - - fermipySource = self._gta.roi.get_source_by_name(extended_source.name) - fermipyPars = [fermipySource["ra"], fermipySource["dec"], fermipySource["SpatialWidth"] ] + + fermipySource = self._gta.roi.get_source_by_name( + extended_source.name + ) + fermipyPars = [ + fermipySource["ra"], + fermipySource["dec"], + fermipySource["SpatialWidth"], + ] if theShape.name == "Disk_on_sphere": - - amPars = [theShape.lon0.value, theShape.lat0.value, theShape.radius.value] - if not np.allclose( fermipyPars, amPars, 1e-10): - - temp_source = self._gta.delete_source(extended_source.name) - temp_source.set_spatial_model("RadialDisk", {'ra': theShape.lon0.value, - 'dec': theShape.lat0.value, 'SpatialWidth': theShape.radius.value}) + + amPars = [ + theShape.lon0.value, + theShape.lat0.value, + theShape.radius.value, + ] + if not np.allclose(fermipyPars, amPars, 1e-10): + + temp_source = self._gta.delete_source( + extended_source.name + ) + temp_source.set_spatial_model( + "RadialDisk", + { + "ra": theShape.lon0.value, + "dec": theShape.lat0.value, + "SpatialWidth": theShape.radius.value, + }, + ) # from fermipy: FIXME: Issue with source map cache with source is initialized as fixed. - self._gta.add_source(extended_source.name, temp_source, free=True) + self._gta.add_source( + extended_source.name, temp_source, free=True + ) self._gta.free_source(extended_source.name, free=False) - self._gta.set_source_spectrum(extended_source.name, "FileFunction", update_source=update_dictionary) - + self._gta.set_source_spectrum( + extended_source.name, + "FileFunction", + update_source=update_dictionary, + ) elif theShape.name == "Gaussian_on_sphere": - amPars = [theShape.lon0.value, theShape.lat0.value, 1.36*theShape.sigma.value] - if not np.allclose( fermipyPars, amPars, 1e-10): - - temp_source = self._gta.delete_source(extended_source.name) - temp_source.set_spatial_model("RadialGaussian", {'ra': theShape.lon0.value, - 'dec': theShape.lat0.value, 'SpatialWidth': 1.36*theShape.sigma.value}) + amPars = [ + theShape.lon0.value, + theShape.lat0.value, + 1.36 * theShape.sigma.value, + ] + if not np.allclose(fermipyPars, amPars, 1e-10): + + temp_source = self._gta.delete_source( + extended_source.name + ) + temp_source.set_spatial_model( + "RadialGaussian", + { + "ra": theShape.lon0.value, + "dec": theShape.lat0.value, + "SpatialWidth": 1.36 * theShape.sigma.value, + }, + ) # from fermipy: FIXME: Issue with source map cache with source is initialized as fixed. - self._gta.add_source(extended_source.name, temp_source, free=True) + self._gta.add_source( + extended_source.name, temp_source, free=True + ) self._gta.free_source(extended_source.name, free=False) - self._gta.set_source_spectrum(extended_source.name, "FileFunction", update_source=update_dictionary) + self._gta.set_source_spectrum( + extended_source.name, + "FileFunction", + update_source=update_dictionary, + ) elif theShape.name == "SpatialTemplate_2D": - #for now, assume we're not updating the fits file + # for now, assume we're not updating the fits file pass else: - #eventually, implement other shapes here. + # eventually, implement other shapes here. pass # Now set the spectrum of this source to the right one - dnde = extended_source.get_spatially_integrated_flux(self._pts_energies) # ph / (cm2 s keV) + dnde = extended_source.get_spatially_integrated_flux( + self._pts_energies + ) # ph / (cm2 s keV) dnde_MeV = np.maximum(dnde * 1000.0, 1e-300) # ph / (cm2 s MeV) # NOTE: I use update_source=False because it makes things 100x faster and I verified that # it does not change the result. # (HF: Not sure who wrote the above but I think sometimes we do want to update fermipy dictionaries.) - self._gta.set_source_dnde(extended_source.name, dnde_MeV, update_source = update_dictionary) - #self._gta._update_roi() + self._gta.set_source_dnde( + extended_source.name, dnde_MeV, update_source=update_dictionary + ) def get_log_like(self): """ @@ -644,14 +774,15 @@ def get_log_like(self): # Update all sources on the fermipy side self._update_model_in_fermipy() - #update nuisance parameters + # update nuisance parameters if self._fit_nuisance_params: for parameter in self.nuisance_parameters: - self.set_nuisance_parameter_value(parameter, self.nuisance_parameters[parameter].value) - - #self.like.syncSrcParams() + self.set_nuisance_parameter_value( + parameter, self.nuisance_parameters[parameter].value + ) + # self.like.syncSrcParams() # Get value of the log likelihood try: @@ -672,7 +803,7 @@ def inner_fit(self): particular detector. If there are no nuisance parameters, simply return the logLike value. """ - + return self.get_log_like() def set_inner_minimization(self, flag: bool) -> None: @@ -692,32 +823,35 @@ def set_inner_minimization(self, flag: bool) -> None: self.nuisance_parameters[parameter].free = self._fit_nuisance_params - def get_number_of_data_points(self): """ Return the number of spatial/energy bins :return: number of bins """ - + num = len(self._gta.components) - + if self._gta.projtype == "WCS": - num = num * self._gta._enumbins * int(self._gta.npix[0]) * int(self._gta.npix[1]) + num = ( + num + * self._gta._enumbins + * int(self._gta.npix[0]) + * int(self._gta.npix[1]) + ) if self._gta.projtype == "HPX": - + num = num * np.sum(self.geom.npix) - + return num def _set_nuisance_parameters(self): # Get the list of the sources - sources = list(self.gta.roi.get_sources() ) + sources = list(self.gta.roi.get_sources()) sources = [s.name for s in sources if "diff" in s.name] - bg_param_names = [] nuisance_parameters = collections.OrderedDict() @@ -726,15 +860,15 @@ def _set_nuisance_parameters(self): if self._fit_nuisance_params: self.gta.free_norm(src_name) - + pars = self.gta.get_free_source_params(src_name) - + for par in pars: - + thisName = f"{self.name}_{src_name}_{par}" bg_param_names.append(thisName) - thePar = self.gta._get_param( src_name, par) + thePar = self.gta._get_param(src_name, par) value = thePar["value"] * thePar["scale"] @@ -743,13 +877,17 @@ def _set_nuisance_parameters(self): value, min_value=thePar["min"], max_value=thePar["max"], - delta=0.01*value, - transformation=parameter_transformation.get_transformation("log10") + delta=0.01 * value, + transformation=parameter_transformation.get_transformation( + "log10" + ), ) nuisance_parameters[thisName].free = self._fit_nuisance_params - - log.debug(f"Added nuisance parameter {nuisance_parameters[thisName]}") + + log.debug( + f"Added nuisance parameter {nuisance_parameters[thisName]}" + ) return nuisance_parameters @@ -758,11 +896,450 @@ def _split_nuisance_parameter(self, param_name): tokens = param_name.split("_") pname = tokens[-1] src_name = "_".join(tokens[1:-1]) - + return src_name, pname - + def set_nuisance_parameter_value(self, paramName, value): srcName, parName = self._split_nuisance_parameter(paramName) - self.gta.set_parameter(srcName, parName, value, scale = 1, update_source=False) + self.gta.set_parameter( + srcName, parName, value, scale=1, update_source=False + ) + + def display_model( + self, + data_color: str = "k", + model_cmap: str = threeML_config.plugins.fermipy.fit_plot.model_cmap.value, + model_color: str = threeML_config.plugins.fermipy.fit_plot.model_color, + total_model_color: str = threeML_config.plugins.fermipy.fit_plot.total_model_color, + background_color: str = "b", + show_data: bool = True, + primary_sources: Optional[Union[str, List[str]]] = None, + show_background_sources: bool = threeML_config.plugins.fermipy.fit_plot.show_background_sources, + shade_fixed_sources: bool = threeML_config.plugins.fermipy.fit_plot.shade_fixed_sources, + shade_secondary_source: bool = threeML_config.plugins.fermipy.fit_plot.shade_secondary_sources, + fixed_sources_color: str = threeML_config.plugins.fermipy.fit_plot.fixed_sources_color, + secondary_sources_color: str = threeML_config.plugins.fermipy.fit_plot.secondary_sources_color, + show_residuals: bool = True, + ratio_residuals: bool = False, + show_legend: bool = True, + model_label: Optional[str] = None, + model_kwargs: Optional[Dict[str, Any]] = None, + data_kwargs: Optional[Dict[str, Any]] = None, + background_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> ResidualPlot: + """ + Plot the current model with or without the data and the residuals. Multiple models can be plotted by supplying + a previous axis to 'model_subplot'. + + Example usage: + + fig = data.display_model() + + fig2 = data2.display_model(model_subplot=fig.axes) + + + :param data_color: the color of the data + :param model_color: the color of the model + :param show_data: (bool) show_the data with the model + :param show_residuals: (bool) shoe the residuals + :param ratio_residuals: (bool) use model ratio instead of residuals + :param show_legend: (bool) show legend + :param model_label: (optional) the label to use for the model default is plugin name + :param model_kwargs: plotting kwargs affecting the plotting of the model + :param data_kwargs: plotting kwargs affecting the plotting of the data and residuls + :param background_kwargs: plotting kwargs affecting the plotting of the background + :return: + """ + # The model color is set to red by default... + # It should be set to none or all the free sources will have the same color + model_color=None + log.debug(f'model_color : {model_color}') + + # set up the default plotting + + _default_model_kwargs = dict(alpha=1) + + _default_background_kwargs = dict( + color=background_color, alpha=1, ls="--" + ) + + _sub_menu = threeML_config.plotting.residual_plot + + _default_data_kwargs = dict( + color=data_color, + alpha=1, + fmt=_sub_menu.marker, + markersize=_sub_menu.size, + ls="", + elinewidth=2, # _sub_menu.linewidth, + capsize=0, + ) + + # overwrite if these are in the confif + + _kwargs_menu: FermiSpectrumPlot = ( + threeML_config.plugins.fermipy.fit_plot + ) + + if _kwargs_menu.model_mpl_kwargs is not None: + + for k, v in _kwargs_menu.model_mpl_kwargs.items(): + + _default_model_kwargs[k] = v + + if _kwargs_menu.data_mpl_kwargs is not None: + + for k, v in _kwargs_menu.data_mpl_kwargs.items(): + + _default_data_kwargs[k] = v + + if _kwargs_menu.background_mpl_kwargs is not None: + + for k, v in _kwargs_menu.background_mpl_kwargs.items(): + + _default_background_kwargs[k] = v + + if model_kwargs is not None: + + assert type(model_kwargs) == dict, "model_kwargs must be a dict" + + for k, v in list(model_kwargs.items()): + + if k in _default_model_kwargs: + + _default_model_kwargs[k] = v + + else: + + _default_model_kwargs[k] = v + + if data_kwargs is not None: + + assert type(data_kwargs) == dict, "data_kwargs must be a dict" + + for k, v in list(data_kwargs.items()): + + if k in _default_data_kwargs: + + _default_data_kwargs[k] = v + + else: + _default_data_kwargs[k] = v + + if background_kwargs is not None: + + assert ( + type(background_kwargs) == dict + ), "background_kwargs must be a dict" + + for k, v in list(background_kwargs.items()): + + if k in _default_background_kwargs: + + _default_background_kwargs[k] = v + + else: + + _default_background_kwargs[k] = v + + # since we define some defualts, lets not overwrite + # the users + + _duplicates = (("ls", "linestyle"), ("lw", "linewidth")) + + for d in _duplicates: + + if (d[0] in _default_model_kwargs) and ( + d[1] in _default_model_kwargs + ): + + _default_model_kwargs.pop(d[0]) + + if (d[0] in _default_data_kwargs) and ( + d[1] in _default_data_kwargs + ): + + _default_data_kwargs.pop(d[0]) + + if (d[0] in _default_background_kwargs) and ( + d[1] in _default_background_kwargs + ): + + _default_background_kwargs.pop(d[0]) + + if model_label is None: + model_label = "%s Model" % self._name + + residual_plot = ResidualPlot( + show_residuals=show_residuals, + ratio_residuals=ratio_residuals, + **kwargs, + ) + + e1 = self._gta.energies[:-1] * 1000.0 # this has to be in keV + e2 = self._gta.energies[1:] * 1000.0 # this has to be in keV + + log.debug(f"ENERGIES [MeV]: {self._gta.energies}") + + ec = (e1 + e2) / 2.0 + de = (e2 - e1) / 2.0 + + observation_duration = self.get_observation_duration() + + conversion_factor = de * observation_duration + + # now lets figure out the free and fixed sources + + primary_source_names = [] + + if primary_sources is not None: + + primary_source_names = np.atleast_1d(primary_sources) + primary_sources = [] + + fixed_sources = [] + free_sources = [] + + for name in self._gta.like.sourceNames(): + + if name in primary_source_names: + + primary_sources.append(name) + + else: + + if name in self._likelihood_model.sources: + + this_source: astromodels.sources.Source = ( + self._likelihood_model.sources[name] + ) + + if this_source.has_free_parameters: + + free_sources.append(name) + + else: + + fixed_sources.append(name) + + elif name == "galdiff": + # Diffuse emission models should be always displayed with the other sources + # if self._nuisance_parameters["LAT_galdiff_Prefactor"].free: + + free_sources.append(name) + + #else: + + # fixed_sources.append(name) + + elif name == "isodiff": + # Diffuse emission models should be always displayed with the other sources + #if self._nuisance_parameters["LAT_isodiff_Normalization"].free: + + free_sources.append(name) + + #else: + + # fixed_sources.append(name) + + log.debug(f"fixed_sources: {fixed_sources} ") + log.debug(f"free_sources: {free_sources} ") + log.debug(f"primary_sources: {primary_sources} ") + + if not show_background_sources: + + if primary_sources is None: + + msg = "no primary_sources set! Cannot compute net rate" + + log.error(msg) + + raise RuntimeError(msg) + + n_model_colors = 0 + + if not shade_fixed_sources and show_background_sources: + n_model_colors += len(fixed_sources) + + if not shade_secondary_source and show_background_sources: + n_model_colors += len(free_sources) + + if primary_sources is not None: + n_model_colors += len(primary_sources) + + log.debug(f"there are {n_model_colors} colors to be used") + + if model_color is not None: + + model_colors = [model_color] * n_model_colors + + else: + + model_colors = cmap_intervals(n_model_colors, model_cmap) + + sum_model = np.zeros_like( + self._gta.model_counts_spectrum(self._gta.like.sourceNames()[0])[0] + ) + + sum_backgrounds = np.zeros_like(sum_model) + + color_itr = 0 + + for source_name in fixed_sources: + + source_counts = self._gta.model_counts_spectrum(source_name)[0] + + log.debug(f"{source_name}: source_counts= {source_counts.sum()}") + + sum_model += source_counts + + if not show_background_sources: + + sum_backgrounds = sum_backgrounds + source_counts + + else: + + if shade_fixed_sources: + + color = fixed_sources_color + label = None + + else: + + color = model_colors[color_itr] + + color_itr += 1 + + label = source_name + + residual_plot.add_model( + ec, + source_counts / conversion_factor, + label=label, + color=color, + **_default_model_kwargs, + ) + + for source_name in free_sources: + + source_counts = self._gta.model_counts_spectrum(source_name)[0] + + log.debug(f"{source_name}: source_counts= {source_counts.sum()}") + + sum_model += source_counts + + if not show_background_sources: + + sum_backgrounds = sum_backgrounds + source_counts + + else: + + if shade_secondary_source: + + color = secondary_sources_color + label = None + + else: + + color = model_colors[color_itr] + + color_itr += 1 + label = source_name + + + residual_plot.add_model( + ec, + source_counts / conversion_factor, + label=label, + color=color, + **_default_model_kwargs, + ) + + if primary_sources is not None: + + for source_name in primary_sources: + + source_counts = self._gta.model_counts_spectrum(source_name)[0] + + log.debug(f"{source_name}: source_counts= {source_counts.sum()}") + + sum_model += source_counts + # sum_backgrounds = sum_backgrounds + source_counts + + color = model_colors[color_itr] + + color_itr += 1 + label = source_name + + residual_plot.add_model( + ec, + source_counts / conversion_factor, + label=label, + color=color, + **_default_model_kwargs, + ) + + # sub.plot(ec, self._gta.like._srcCnts(source_name), label=source_name) + + log.debug(f"sum_model={sum_model.sum()}") + log.debug(f"sum_backgrounds={sum_backgrounds.sum()}") + + residual_plot.add_model( + ec, + sum_model / conversion_factor, + label="Total Model", + color=total_model_color, + **_default_model_kwargs, + ) + + # sub.plot(ec, sum_model, label="Total Model") + + # now get the counts + + y = self._gta._roi_data["counts"] + y_err = np.sqrt(y) + log.debug(f"counts={y.sum()}") + + significance_calc = Significance(Non=y, Noff=sum_model) + + if ratio_residuals: + resid = old_div((y - sum_model), sum_model) + resid_err = old_div(y_err, sum_model) + else: + # resid = significance_calc.li_and_ma() + resid = significance_calc.known_background() + resid_err = np.ones_like(resid) + + y = y / conversion_factor + y_err = y_err / conversion_factor + + residual_plot.add_data( + ec[y > 0], + y[y > 0], + resid[y > 0], + residual_yerr=resid_err[y > 0], + yerr=y_err[y > 0], + xerr=de[y > 0], + label=self._name, + show_data=show_data, + **_default_data_kwargs, + ) + + if show_background_sources: + + y_label = "Rate\n(counts s$^{-1}$ keV$^{-1}$)" + + else: + + y_label = "Net Rate\n(counts s$^{-1}$ keV$^{-1}$)" + + return residual_plot.finalize( + xlabel="Energy\n(keV)", + ylabel=y_label, + xscale="log", + yscale="log", + show_legend=show_legend, + ) diff --git a/threeML/plugins/SpectrumLike.py b/threeML/plugins/SpectrumLike.py index 3d8c2280e..1710dca42 100644 --- a/threeML/plugins/SpectrumLike.py +++ b/threeML/plugins/SpectrumLike.py @@ -1898,7 +1898,7 @@ def get_log_like( loglike, _ = self._likelihood_evaluator.get_current_value( precalc_fluxes=precalc_fluxes ) - + if self._exclude_from_fit: loglike*=0 return loglike def inner_fit(self) -> float: diff --git a/threeML/random_variates.py b/threeML/random_variates.py index d09d63300..a45248cc1 100644 --- a/threeML/random_variates.py +++ b/threeML/random_variates.py @@ -34,7 +34,7 @@ def __array_wrap__(self, out_arr, context=None): # This gets called at the end of any operation, where out_arr is the result of the operation # We need to update _orig_value so that the final results will have it - + out_arr = RandomVariates(out_arr) out_arr._orig_value = out_arr.median # then just call the parent diff --git a/threeML/test/test_FermipyLike.py b/threeML/test/test_FermipyLike.py index 4e65390c9..9f5378a6a 100644 --- a/threeML/test/test_FermipyLike.py +++ b/threeML/test/test_FermipyLike.py @@ -34,12 +34,12 @@ def test_FermipyLike_fromVO(): assert np.isclose(dec, 22.013, atol=5e-3) # This gets a 3ML model (a Model instance) from the table above, where every source - # in the 3FGL becomes a Source instance. Note that by default all parameters of all + # in the 4FGL becomes a Source instance. Note that by default all parameters of all # sources are fixed model = lat_catalog.get_model() - assert model.get_number_of_point_sources() == 172 + assert model.get_number_of_point_sources() == 196 # Let's free all the normalizations within 3 deg from the center model.free_point_sources_within_radius(3.0, normalization_only=True) diff --git a/threeML/test/test_bayesian.py b/threeML/test/test_bayesian.py index 9b06cfde9..862714630 100644 --- a/threeML/test/test_bayesian.py +++ b/threeML/test/test_bayesian.py @@ -178,7 +178,7 @@ def test_dynesty_nested(bayes_fitter, completed_bn090217206_bayesian_analysis): bayes.set_sampler("dynesty_nested") - bayes.sampler.setup(n_live_points=100, n_effective=10) + bayes.sampler.setup(n_live_points=200, n_effective=10) bayes.sample() diff --git a/threeML/test/test_fits_file.py b/threeML/test/test_fits_file.py index f790b4254..1c6f3aca3 100644 --- a/threeML/test/test_fits_file.py +++ b/threeML/test/test_fits_file.py @@ -48,7 +48,7 @@ def test_fits_file(): assert dummy_fits["TEST"].header["TFORM1"] == dtype_keys[i] - assert np.alltrue(dummy_fits["TEST"].data["TEST_VALUE"] == test_values) + assert np.all(dummy_fits["TEST"].data["TEST_VALUE"] == test_values) file_name = "test_fits%d.fits" % i @@ -66,4 +66,4 @@ def test_fits_file(): assert read_dummy_fits["TEST"].header["TFORM1"] == dtype_keys[i] - assert np.alltrue(read_dummy_fits["TEST"].data["TEST_VALUE"] == test_values) + assert np.all(read_dummy_fits["TEST"].data["TEST_VALUE"] == test_values) diff --git a/threeML/test/test_response.py b/threeML/test/test_response.py index 0340f81af..6de187c0d 100644 --- a/threeML/test/test_response.py +++ b/threeML/test/test_response.py @@ -11,6 +11,10 @@ ) from threeML.utils.time_interval import TimeInterval +if np.lib.NumpyVersion(np.__version__) >= '2.0.0b1': + from numpy.exceptions import VisibleDeprecationWarning +else: + from numpy import VisibleDeprecationWarning def get_matrix_elements(): @@ -287,7 +291,7 @@ def test_response_set_constructor(): with warnings.catch_warnings(): - warnings.simplefilter("error", np.VisibleDeprecationWarning) + warnings.simplefilter("error", VisibleDeprecationWarning) rsp_set = InstrumentResponseSet.from_rsp2_file( rsp2_file, exposure_getter, counts_getter diff --git a/threeML/utils/OGIP/response.py b/threeML/utils/OGIP/response.py index 6ab27490a..24c066412 100644 --- a/threeML/utils/OGIP/response.py +++ b/threeML/utils/OGIP/response.py @@ -6,7 +6,7 @@ import astropy.io.fits as pyfits import astropy.units as u -import matplotlib.cm as cm +from matplotlib import colormaps import matplotlib.pyplot as plt import numba as nb import numpy as np @@ -322,7 +322,7 @@ def plot_matrix(self) -> plt.Figure: vmin = self._matrix[self._matrix > 0].min() cmap = copy.deepcopy( - cm.get_cmap(threeML_config.plugins.ogip.response_cmap.value) + colormaps[threeML_config.plugins.ogip.response_cmap.value] ) cmap.set_under(threeML_config.plugins.ogip.response_zero_color) diff --git a/threeML/utils/data_builders/fermi/gbm_data.py b/threeML/utils/data_builders/fermi/gbm_data.py index 9f6e22450..8960b1be5 100644 --- a/threeML/utils/data_builders/fermi/gbm_data.py +++ b/threeML/utils/data_builders/fermi/gbm_data.py @@ -48,7 +48,7 @@ def __init__(self, ttefile: str) -> None: # sorting in time sort_idx = self._events.argsort() - if not np.alltrue(self._events[sort_idx] == self._events): + if not np.all(self._events[sort_idx] == self._events): # now sort both time and energy log.warning( diff --git a/threeML/utils/data_builders/fermi/lat_transient_builder.py b/threeML/utils/data_builders/fermi/lat_transient_builder.py index 15b74aa76..4c9a116e7 100644 --- a/threeML/utils/data_builders/fermi/lat_transient_builder.py +++ b/threeML/utils/data_builders/fermi/lat_transient_builder.py @@ -629,9 +629,13 @@ def run(self, include_previous_intervals=False, recompute_intervals=False): os.path.join(gtapp_mp_dir, 'gtltcube_mp.py'), os.path.join(gtapp_mp_dir, 'gttsmap_mp.py'), ] - for _e in executables: - print ("Changing permission to %s" % _e) - os.chmod(_e, 0o755) + + try: + for _e in executables: + log.info('Changing permission to %s' % _e) + os.chmod(_e, 0o755) + except PermissionError: + pass log.info('About to run the following command:\n%s' % cmd) diff --git a/threeML/utils/data_builders/time_series_builder.py b/threeML/utils/data_builders/time_series_builder.py index 40ca16840..9863f2a53 100644 --- a/threeML/utils/data_builders/time_series_builder.py +++ b/threeML/utils/data_builders/time_series_builder.py @@ -1350,7 +1350,7 @@ def from_lat_lle( if trigger_time is not None: lat_lle_file.trigger_time = trigger_time - # Mark channels less than 50 MeV as bad + # Mark channels less than 30 MeV as bad channel_30MeV = np.searchsorted( lat_lle_file.energy_edges[0], 30000.0) - 1 @@ -1462,31 +1462,31 @@ def from_pol_spectrum( # self._default_unbinned = unbinned - # extract the polar varaibles + # extract the polarization variables - polar_data = PolData( + polarization_data = PolData( polevents, polrsp, specrsp, reference_time=trigger_time ) # Create the the event list event_list = EventListWithDeadTimeFraction( - arrival_times=polar_data.time, - measurement=polar_data.pha, - n_channels=polar_data.n_channels, - start_time=polar_data.time.min(), - stop_time=polar_data.time.max(), - dead_time_fraction=polar_data.dead_time_fraction, + arrival_times=polarization_data.time, + measurement=polarization_data.pha, + n_channels=polarization_data.n_channels, + start_time=polarization_data.time.min(), + stop_time=polarization_data.time.max(), + dead_time_fraction=polarization_data.dead_time_fraction, verbose=verbose, first_channel=1, - mission=polar_data.mission, - instrument=polar_data.instrument, + mission=polarization_data.mission, + instrument=polarization_data.instrument, ) return cls( name, event_list, - response=polar_data.rsp, + response=polarization_data.rsp, poly_order=poly_order, unbinned=unbinned, verbose=verbose, @@ -1499,8 +1499,8 @@ def from_polarization( cls, name, polevents, + polrsp, specrsp=None, - polrsp=None, restore_background=None, trigger_time=0.0, poly_order=-1, @@ -1517,23 +1517,26 @@ def from_polarization( # extract the polar varaibles - polar_data = PolData( + polarization_data = PolData( polevents, polrsp, specrsp, trigger_time) + + # get the pa offset + cls._pa_offset = polarization_data.get_pa_offset() # Create the the event list event_list = EventListWithDeadTimeFraction( - arrival_times=polar_data.scattering_angle_time, - measurement=polar_data.scattering_angles, - n_channels=polar_data.n_scattering_bins, - start_time=polar_data.scattering_angle_time.min(), - stop_time=polar_data.scattering_angle_time.max(), - dead_time_fraction=polar_data.scattering_angle_dead_time_fraction, + arrival_times=polarization_data.scattering_angle_time, + measurement=polarization_data.scattering_angles, + n_channels=polarization_data.n_scattering_bins, + start_time=polarization_data.scattering_angle_time.min(), + stop_time=polarization_data.scattering_angle_time.max(), + dead_time_fraction=polarization_data.scattering_angle_dead_time_fraction, verbose=verbose, first_channel=1, - mission=polar_data.mission, - instrument=polar_data.instrument, - edges=polar_data.scattering_edges, + mission=polarization_data.mission, + instrument=polarization_data.instrument, + edges=polarization_data.scattering_edges, ) return cls( @@ -1561,7 +1564,7 @@ def to_polarizationlike( assert issubclass( self._container_type, BinnedModulationCurve - ), "You are attempting to create a POLARLike plugin from the wrong data type" + ), "You are attempting to create a PolarizationLike plugin from the wrong data type" if extract_measured_background: @@ -1572,7 +1575,7 @@ def to_polarizationlike( this_background_spectrum = self._background_spectrum if isinstance(self._response, str): - self._response = PolResponse(self._response, pa_offset) + self._response = PolResponse(self._response, self._pa_offset) if not from_bins: @@ -1613,7 +1616,7 @@ def to_polarizationlike( self._verbose = False - list_of_polarlikes = [] + list_of_polarizationlikes = [] # now we make one response to save time @@ -1663,7 +1666,7 @@ def to_polarizationlike( # tstop=self._tstop ) - list_of_polarlikes.append(pl) + list_of_polarizationlikes.append(pl) except (NegativeBackground): log.error( @@ -1682,7 +1685,7 @@ def to_polarizationlike( self._verbose = old_verbose - return list_of_polarlikes + return list_of_polarizationlikes # get the bins from the time series # for event lists, these are from created bins # for binned spectra sets, these are the native bines @@ -1719,7 +1722,7 @@ def to_polarizationlike( try: - pl = PolarLike( + pl = PolarizationLike( name="%s%s%d" % (self._name, interval_name, i), observation=self._observed_spectrum, background=this_background_spectrum, @@ -1729,7 +1732,7 @@ def to_polarizationlike( # tstop=self._tstop ) - list_of_polarlikes.append(pl) + list_of_polarizationlikes.append(pl) except (NegativeBackground): log.error( @@ -1748,4 +1751,4 @@ def to_polarizationlike( self._verbose = old_verbose - return list_of_polarlikes + return list_of_polarizationlikes diff --git a/threeML/utils/data_download/Fermi_LAT/download_LAT_data.py b/threeML/utils/data_download/Fermi_LAT/download_LAT_data.py index fe416f643..b3dd81fc8 100644 --- a/threeML/utils/data_download/Fermi_LAT/download_LAT_data.py +++ b/threeML/utils/data_download/Fermi_LAT/download_LAT_data.py @@ -658,7 +658,7 @@ def make_LAT_dataset(self, pass - def extract_events(self,roi, zmax, irf, thetamax=180.0,strategy='time'): + def extract_events(self,roi, zmax, irf, thetamax=180.0,strategy='time',data_quality=True): from GtBurst import dataHandling global lastDisplay diff --git a/threeML/utils/fitted_objects/fitted_point_sources.py b/threeML/utils/fitted_objects/fitted_point_sources.py index 074587d47..a72bf428a 100644 --- a/threeML/utils/fitted_objects/fitted_point_sources.py +++ b/threeML/utils/fitted_objects/fitted_point_sources.py @@ -294,17 +294,32 @@ def __init__( except: - self._components = None + try: + self._components = self._point_source.components + + except: + + self._components = None if component is not None: if self._components is not None: - model = self._components[component]["function"].evaluate_at - parameters = self._components[component]["function"].parameters - test_model = self._components[component]["function"] - parameter_names = self._components[component]["parameter_names"] - + if isinstance(self._components[component],dict): + model = self._components[component]["function"].evaluate_at + parameters = self._components[component]["function"].parameters + test_model = self._components[component]["function"] + parameter_names = self._components[component]["parameter_names"] + else: + model = self._components[component].shape.evaluate_at + parameters = self._components[component].shape.parameters + test_model = self._components[component].shape + parameter_names = [ + par.name + for par in list( + self._components[component].shape.parameters.values() + ) + ] else: raise NotCompositeModelError("This is not a composite model!") diff --git a/threeML/utils/fitted_objects/fitted_source_handler.py b/threeML/utils/fitted_objects/fitted_source_handler.py index abd403ef8..3eae141aa 100644 --- a/threeML/utils/fitted_objects/fitted_source_handler.py +++ b/threeML/utils/fitted_objects/fitted_source_handler.py @@ -187,7 +187,7 @@ def _evaluate(self): variates = [] # scroll through the independent variables - n_iterations = np.product(self._out_shape) + n_iterations = np.prod(self._out_shape) with use_astromodels_memoization(False): diff --git a/threeML/utils/progress_bar.py b/threeML/utils/progress_bar.py index f2db0f676..9e2434dbe 100644 --- a/threeML/utils/progress_bar.py +++ b/threeML/utils/progress_bar.py @@ -1,4 +1,4 @@ -from matplotlib import cm +from matplotlib import colormaps from matplotlib.colors import to_hex import numpy as np @@ -21,8 +21,7 @@ class _Get_Color(object): def __init__(self, n_colors=5): - cmap = cm.get_cmap( - threeML_config.interface.multi_progress_cmap) + cmap = colormaps[threeML_config.interface.multi_progress_cmap] self._colors = [to_hex(c) for c in cmap(np.linspace(0,1,n_colors))] diff --git a/threeML/utils/spectrum/spectrum_likelihood.py b/threeML/utils/spectrum/spectrum_likelihood.py index bc93b82af..a696efe9f 100644 --- a/threeML/utils/spectrum/spectrum_likelihood.py +++ b/threeML/utils/spectrum/spectrum_likelihood.py @@ -406,7 +406,7 @@ def __init__(self, spectrum_plugin): raise RuntimeError() try: - from ixpe.likelihood import GaussianObservedGaussianBackgroundStatistic + from ixpepy.likelihood import GaussianObservedGaussianBackgroundStatistic log.info('IXPE plugin found. Enabling Gaussian source with Gaussian background.') except: GaussianObservedGaussianBackgroundStatistic = NotAvailableStatistic diff --git a/threeML/utils/statistics/likelihood_functions.py b/threeML/utils/statistics/likelihood_functions.py index 0ab2f00f6..9b6b408d2 100644 --- a/threeML/utils/statistics/likelihood_functions.py +++ b/threeML/utils/statistics/likelihood_functions.py @@ -239,16 +239,24 @@ def poisson_observed_gaussian_background( # Let's do the branch with background > 0 first if background_counts[idx] > 0: - - log_likes[idx] = ( - -((b[idx] - background_counts[idx]) ** 2) / (2 * s2) - + observed_counts[idx] * log(b[idx] + expected_model_counts[idx]) - - b[idx] - - expected_model_counts[idx] - - logfactorial(observed_counts[idx]) - - 0.5 * _log_pi_2 - - log(background_error[idx]) - ) + if observed_counts[idx] > 0: + log_likes[idx] = ( + -((b[idx] - background_counts[idx]) ** 2) / (2 * s2) + + observed_counts[idx] * log(b[idx] + expected_model_counts[idx]) + - b[idx] + - expected_model_counts[idx] + - logfactorial(observed_counts[idx]) + - 0.5 * _log_pi_2 + - log(background_error[idx]) + ) + else: + log_likes[idx] = ( + -((b[idx] - background_counts[idx]) ** 2) / (2 * s2) + - b[idx] + - expected_model_counts[idx] + - 0.5 * _log_pi_2 + - log(background_error[idx]) + ) # Let's do the other branch @@ -261,6 +269,13 @@ def poisson_observed_gaussian_background( - expected_model_counts[idx] - logfactorial(observed_counts[idx]) ) + # print ('N=',observed_counts[idx], + # 'M=',expected_model_counts[idx], + # 'B=',background_counts[idx], + # 'BE=', background_error[idx], + # 'b=',b[idx], + # 'b+M=', b[idx] + expected_model_counts[idx], + # 'LL=',log_likes[idx]) return log_likes, b diff --git a/threeML/utils/time_series/polynomial.py b/threeML/utils/time_series/polynomial.py index 7a004c125..ba137290f 100644 --- a/threeML/utils/time_series/polynomial.py +++ b/threeML/utils/time_series/polynomial.py @@ -3,14 +3,15 @@ import numpy as np from astromodels import (Constant, Cubic, Gaussian, Line, Log_normal, Model, - PointSource, Quadratic) + PointSource, Quadratic, Quartic) from threeML.bayesian.bayesian_analysis import BayesianAnalysis from threeML.classicMLE.joint_likelihood import JointLikelihood from threeML.config.config import threeML_config from threeML.config.config_utils import get_value from threeML.data_list import DataList -from threeML.exceptions.custom_exceptions import BadCovariance, FitFailed +from threeML.exceptions.custom_exceptions import BadCovariance#, FitFailed +from threeML.minimizer.minimization import FitFailed from threeML.io.logging import setup_logger, silence_console_log from threeML.minimizer.grid_minimizer import AllFitFailed from threeML.minimizer.minimization import (CannotComputeCovariance, @@ -23,7 +24,7 @@ log = setup_logger(__name__) # we include the line twice to mimic a constant -_grade_model_lookup = (Line, Line, Quadratic, Cubic, Quadratic) +_grade_model_lookup = (Line, Line, Quadratic, Cubic, Quartic) class Polynomial(object): @@ -181,14 +182,13 @@ def polyfit(x: Iterable[float], y: Iterable[float], grade: int, not a member to allow parallel computation :param x: the x coord of the data - :param y: teh y coord of the data + :param y: the y coord of the data :param grade: the polynomical order or grade :param expousure: the exposure of the interval :param bayes: to do a bayesian fit or not """ - # Check that we have enough counts to perform the fit, otherwise # return a "zero polynomial" log.debug(f"starting polyfit with grade {grade} ") @@ -229,10 +229,11 @@ def polyfit(x: Iterable[float], y: Iterable[float], grade: int, log.debug(f"starting polyfit with avg norm {avg}") with silence_console_log(): - xy = XYLike("series", x=x, y=y, exposure=exposure, poisson_data=True, quiet=True) - + #from matplotlib import pyplot as plt + #xy.plot() + #plt.plot(x,exposure) if not bayes: # make sure the model is positive @@ -249,6 +250,7 @@ def polyfit(x: Iterable[float], y: Iterable[float], grade: int, v.value = 0.0 + #v.bounds = (-1e-3, 1e-3) # we actually use a line here # because a constant is returns a # single number @@ -263,31 +265,38 @@ def polyfit(x: Iterable[float], y: Iterable[float], grade: int, jl.set_minimizer("minuit") # if the fit falis, retry and then just accept - + #print('polynomials grade:',grade) + #print('polynomials model:') + #model.display(complete=True) try: + #print ("=================>FIRST FIT!!!!") jl.fit(quiet=True) except(FitFailed, BadCovariance, AllFitFailed, CannotComputeCovariance): + #print ("=================>FIRST FIT FAILED!!!!") + log.debug("1st fit failed") try: + # print ("=================>SECOND FIT!!!!") jl.fit(quiet=True) except(FitFailed, BadCovariance, AllFitFailed, CannotComputeCovariance): + # print ("=================>SECOND FIT FAILED!!!!") log.debug("all MLE fits failed") pass + #plt.plot(x,model._dummy.spectrum.main(x),'k:') + #plt.show() coeff = [v.value for _, v in model.free_parameters.items()] - log.debug(f"got coeff: {coeff}") final_polynomial = Polynomial(coeff) - try: final_polynomial.set_covariace_matrix( jl.results.covariance_matrix) @@ -296,7 +305,6 @@ def polyfit(x: Iterable[float], y: Iterable[float], grade: int, log.exception(f"Fit failed in channel") raise FitFailed() - min_log_likelihood = xy.get_log_like() else: diff --git a/threeML/utils/time_series/time_series.py b/threeML/utils/time_series/time_series.py index 3f4c7452e..bd822b173 100644 --- a/threeML/utils/time_series/time_series.py +++ b/threeML/utils/time_series/time_series.py @@ -832,7 +832,7 @@ def _fit_global_and_determine_optimum_grade(self, """ min_grade = 0 - max_grade = 4 + max_grade = 3 log_likelihoods = [] log.debug("attempting to find best poly with binned data") diff --git a/versioneer.py b/versioneer.py index 2fc1a0762..be0dc74f8 100644 --- a/versioneer.py +++ b/versioneer.py @@ -341,9 +341,9 @@ def get_config_from_root(root): # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() + parser = configparser.ConfigParser() with open(setup_cfg, "r") as f: - parser.readfp(f) + parser.read_file(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name):