#19 reset HEAD
This commit is contained in:
BIN
cmd/pkgdash/assets/ui/favicon.ico
Normal file
BIN
cmd/pkgdash/assets/ui/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 948 B |
16
cmd/pkgdash/assets/ui/index.html
Normal file
16
cmd/pkgdash/assets/ui/index.html
Normal file
@@ -0,0 +1,16 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>UI</title>
|
||||
<base href="/ui/">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link rel="icon" type="image/x-icon" href="favicon.ico">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com">
|
||||
<link href="https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500&display=swap" rel="stylesheet">
|
||||
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
|
||||
<link rel="stylesheet" href="styles.css"></head>
|
||||
<body class="mat-typography">
|
||||
<app-root></app-root>
|
||||
<script src="runtime.js" type="module"></script><script src="polyfills.js" type="module"></script><script src="vendor.js" type="module"></script><script src="main.js" type="module"></script></body>
|
||||
</html>
|
2386
cmd/pkgdash/assets/ui/main.js
Normal file
2386
cmd/pkgdash/assets/ui/main.js
Normal file
File diff suppressed because it is too large
Load Diff
1
cmd/pkgdash/assets/ui/main.js.map
Normal file
1
cmd/pkgdash/assets/ui/main.js.map
Normal file
File diff suppressed because one or more lines are too long
3398
cmd/pkgdash/assets/ui/polyfills.js
Normal file
3398
cmd/pkgdash/assets/ui/polyfills.js
Normal file
File diff suppressed because it is too large
Load Diff
1
cmd/pkgdash/assets/ui/polyfills.js.map
Normal file
1
cmd/pkgdash/assets/ui/polyfills.js.map
Normal file
File diff suppressed because one or more lines are too long
163
cmd/pkgdash/assets/ui/runtime.js
Normal file
163
cmd/pkgdash/assets/ui/runtime.js
Normal file
@@ -0,0 +1,163 @@
|
||||
/******/ (() => { // webpackBootstrap
|
||||
/******/ "use strict";
|
||||
/******/ var __webpack_modules__ = ({});
|
||||
/************************************************************************/
|
||||
/******/ // The module cache
|
||||
/******/ var __webpack_module_cache__ = {};
|
||||
/******/
|
||||
/******/ // The require function
|
||||
/******/ function __webpack_require__(moduleId) {
|
||||
/******/ // Check if module is in cache
|
||||
/******/ var cachedModule = __webpack_module_cache__[moduleId];
|
||||
/******/ if (cachedModule !== undefined) {
|
||||
/******/ return cachedModule.exports;
|
||||
/******/ }
|
||||
/******/ // Create a new module (and put it into the cache)
|
||||
/******/ var module = __webpack_module_cache__[moduleId] = {
|
||||
/******/ // no module.id needed
|
||||
/******/ // no module.loaded needed
|
||||
/******/ exports: {}
|
||||
/******/ };
|
||||
/******/
|
||||
/******/ // Execute the module function
|
||||
/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__);
|
||||
/******/
|
||||
/******/ // Return the exports of the module
|
||||
/******/ return module.exports;
|
||||
/******/ }
|
||||
/******/
|
||||
/******/ // expose the modules object (__webpack_modules__)
|
||||
/******/ __webpack_require__.m = __webpack_modules__;
|
||||
/******/
|
||||
/************************************************************************/
|
||||
/******/ /* webpack/runtime/chunk loaded */
|
||||
/******/ (() => {
|
||||
/******/ var deferred = [];
|
||||
/******/ __webpack_require__.O = (result, chunkIds, fn, priority) => {
|
||||
/******/ if(chunkIds) {
|
||||
/******/ priority = priority || 0;
|
||||
/******/ for(var i = deferred.length; i > 0 && deferred[i - 1][2] > priority; i--) deferred[i] = deferred[i - 1];
|
||||
/******/ deferred[i] = [chunkIds, fn, priority];
|
||||
/******/ return;
|
||||
/******/ }
|
||||
/******/ var notFulfilled = Infinity;
|
||||
/******/ for (var i = 0; i < deferred.length; i++) {
|
||||
/******/ var [chunkIds, fn, priority] = deferred[i];
|
||||
/******/ var fulfilled = true;
|
||||
/******/ for (var j = 0; j < chunkIds.length; j++) {
|
||||
/******/ if ((priority & 1 === 0 || notFulfilled >= priority) && Object.keys(__webpack_require__.O).every((key) => (__webpack_require__.O[key](chunkIds[j])))) {
|
||||
/******/ chunkIds.splice(j--, 1);
|
||||
/******/ } else {
|
||||
/******/ fulfilled = false;
|
||||
/******/ if(priority < notFulfilled) notFulfilled = priority;
|
||||
/******/ }
|
||||
/******/ }
|
||||
/******/ if(fulfilled) {
|
||||
/******/ deferred.splice(i--, 1)
|
||||
/******/ var r = fn();
|
||||
/******/ if (r !== undefined) result = r;
|
||||
/******/ }
|
||||
/******/ }
|
||||
/******/ return result;
|
||||
/******/ };
|
||||
/******/ })();
|
||||
/******/
|
||||
/******/ /* webpack/runtime/compat get default export */
|
||||
/******/ (() => {
|
||||
/******/ // getDefaultExport function for compatibility with non-harmony modules
|
||||
/******/ __webpack_require__.n = (module) => {
|
||||
/******/ var getter = module && module.__esModule ?
|
||||
/******/ () => (module['default']) :
|
||||
/******/ () => (module);
|
||||
/******/ __webpack_require__.d(getter, { a: getter });
|
||||
/******/ return getter;
|
||||
/******/ };
|
||||
/******/ })();
|
||||
/******/
|
||||
/******/ /* webpack/runtime/define property getters */
|
||||
/******/ (() => {
|
||||
/******/ // define getter functions for harmony exports
|
||||
/******/ __webpack_require__.d = (exports, definition) => {
|
||||
/******/ for(var key in definition) {
|
||||
/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {
|
||||
/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });
|
||||
/******/ }
|
||||
/******/ }
|
||||
/******/ };
|
||||
/******/ })();
|
||||
/******/
|
||||
/******/ /* webpack/runtime/hasOwnProperty shorthand */
|
||||
/******/ (() => {
|
||||
/******/ __webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))
|
||||
/******/ })();
|
||||
/******/
|
||||
/******/ /* webpack/runtime/make namespace object */
|
||||
/******/ (() => {
|
||||
/******/ // define __esModule on exports
|
||||
/******/ __webpack_require__.r = (exports) => {
|
||||
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
|
||||
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
|
||||
/******/ }
|
||||
/******/ Object.defineProperty(exports, '__esModule', { value: true });
|
||||
/******/ };
|
||||
/******/ })();
|
||||
/******/
|
||||
/******/ /* webpack/runtime/jsonp chunk loading */
|
||||
/******/ (() => {
|
||||
/******/ // no baseURI
|
||||
/******/
|
||||
/******/ // object to store loaded and loading chunks
|
||||
/******/ // undefined = chunk not loaded, null = chunk preloaded/prefetched
|
||||
/******/ // [resolve, reject, Promise] = chunk loading, 0 = chunk loaded
|
||||
/******/ var installedChunks = {
|
||||
/******/ "runtime": 0
|
||||
/******/ };
|
||||
/******/
|
||||
/******/ // no chunk on demand loading
|
||||
/******/
|
||||
/******/ // no prefetching
|
||||
/******/
|
||||
/******/ // no preloaded
|
||||
/******/
|
||||
/******/ // no HMR
|
||||
/******/
|
||||
/******/ // no HMR manifest
|
||||
/******/
|
||||
/******/ __webpack_require__.O.j = (chunkId) => (installedChunks[chunkId] === 0);
|
||||
/******/
|
||||
/******/ // install a JSONP callback for chunk loading
|
||||
/******/ var webpackJsonpCallback = (parentChunkLoadingFunction, data) => {
|
||||
/******/ var [chunkIds, moreModules, runtime] = data;
|
||||
/******/ // add "moreModules" to the modules object,
|
||||
/******/ // then flag all "chunkIds" as loaded and fire callback
|
||||
/******/ var moduleId, chunkId, i = 0;
|
||||
/******/ if(chunkIds.some((id) => (installedChunks[id] !== 0))) {
|
||||
/******/ for(moduleId in moreModules) {
|
||||
/******/ if(__webpack_require__.o(moreModules, moduleId)) {
|
||||
/******/ __webpack_require__.m[moduleId] = moreModules[moduleId];
|
||||
/******/ }
|
||||
/******/ }
|
||||
/******/ if(runtime) var result = runtime(__webpack_require__);
|
||||
/******/ }
|
||||
/******/ if(parentChunkLoadingFunction) parentChunkLoadingFunction(data);
|
||||
/******/ for(;i < chunkIds.length; i++) {
|
||||
/******/ chunkId = chunkIds[i];
|
||||
/******/ if(__webpack_require__.o(installedChunks, chunkId) && installedChunks[chunkId]) {
|
||||
/******/ installedChunks[chunkId][0]();
|
||||
/******/ }
|
||||
/******/ installedChunks[chunkId] = 0;
|
||||
/******/ }
|
||||
/******/ return __webpack_require__.O(result);
|
||||
/******/ }
|
||||
/******/
|
||||
/******/ var chunkLoadingGlobal = self["webpackChunkui"] = self["webpackChunkui"] || [];
|
||||
/******/ chunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null, 0));
|
||||
/******/ chunkLoadingGlobal.push = webpackJsonpCallback.bind(null, chunkLoadingGlobal.push.bind(chunkLoadingGlobal));
|
||||
/******/ })();
|
||||
/******/
|
||||
/************************************************************************/
|
||||
/******/
|
||||
/******/
|
||||
/******/ })()
|
||||
;
|
||||
//# sourceMappingURL=runtime.js.map
|
1
cmd/pkgdash/assets/ui/runtime.js.map
Normal file
1
cmd/pkgdash/assets/ui/runtime.js.map
Normal file
File diff suppressed because one or more lines are too long
644
cmd/pkgdash/assets/ui/styles.css
Normal file
644
cmd/pkgdash/assets/ui/styles.css
Normal file
File diff suppressed because one or more lines are too long
1
cmd/pkgdash/assets/ui/styles.css.map
Normal file
1
cmd/pkgdash/assets/ui/styles.css.map
Normal file
File diff suppressed because one or more lines are too long
121153
cmd/pkgdash/assets/ui/vendor.js
Normal file
121153
cmd/pkgdash/assets/ui/vendor.js
Normal file
File diff suppressed because one or more lines are too long
1
cmd/pkgdash/assets/ui/vendor.js.map
Normal file
1
cmd/pkgdash/assets/ui/vendor.js.map
Normal file
File diff suppressed because one or more lines are too long
237
cmd/pkgdash/main.go
Normal file
237
cmd/pkgdash/main.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"embed"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
jsoncodec "go.unistack.org/micro-codec-json/v3"
|
||||
jsonpbcodec "go.unistack.org/micro-codec-jsonpb/v3"
|
||||
yamlcodec "go.unistack.org/micro-codec-yaml/v3"
|
||||
envconfig "go.unistack.org/micro-config-env/v3"
|
||||
fileconfig "go.unistack.org/micro-config-file/v3"
|
||||
vaultconfig "go.unistack.org/micro-config-vault/v3"
|
||||
victoriameter "go.unistack.org/micro-meter-victoriametrics/v3"
|
||||
httpsrv "go.unistack.org/micro-server-http/v3"
|
||||
healthhandler "go.unistack.org/micro-server-http/v3/handler/health"
|
||||
meterhandler "go.unistack.org/micro-server-http/v3/handler/meter"
|
||||
spahandler "go.unistack.org/micro-server-http/v3/handler/spa"
|
||||
swaggerui "go.unistack.org/micro-server-http/v3/handler/swagger-ui"
|
||||
"go.unistack.org/micro/v3"
|
||||
"go.unistack.org/micro/v3/config"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
slog "go.unistack.org/micro/v3/logger/slog"
|
||||
"go.unistack.org/micro/v3/meter"
|
||||
"go.unistack.org/micro/v3/server"
|
||||
rutil "go.unistack.org/micro/v3/util/reflect"
|
||||
appconfig "go.unistack.org/pkgdash/internal/config"
|
||||
"go.unistack.org/pkgdash/internal/database"
|
||||
"go.unistack.org/pkgdash/internal/handler"
|
||||
"go.unistack.org/pkgdash/internal/storage"
|
||||
_ "go.unistack.org/pkgdash/internal/storage/sqlite"
|
||||
"go.unistack.org/pkgdash/internal/worker"
|
||||
pb "go.unistack.org/pkgdash/proto"
|
||||
)
|
||||
|
||||
const appName = "pkgdash"
|
||||
|
||||
var (
|
||||
BuildDate string = "now" // filled when build
|
||||
AppVersion string = "latest" // filled when build
|
||||
)
|
||||
|
||||
//go:generate rm -rf assets
|
||||
//go:generate mkdir assets
|
||||
//go:generate cp -vr ../../ui/dist/ui assets/
|
||||
//go:embed assets/*
|
||||
var assets embed.FS
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
log := slog.NewLogger(logger.WithLevel(logger.DebugLevel))
|
||||
if err := log.Init(); err != nil {
|
||||
log.Fatal(ctx, "failed to init logger")
|
||||
}
|
||||
|
||||
cfg := appconfig.NewConfig(appName, AppVersion) // create new empty config
|
||||
vc := vaultconfig.NewConfig(
|
||||
config.AllowFail(true), // that may be not exists
|
||||
config.Struct(cfg), // load from vault
|
||||
config.Codec(jsoncodec.NewCodec()), // vault config in json
|
||||
config.BeforeLoad(func(ctx context.Context, c config.Config) error {
|
||||
return c.Init(
|
||||
vaultconfig.HTTPClient(&http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
}),
|
||||
vaultconfig.Address(cfg.Vault.Addr),
|
||||
vaultconfig.Timeout(5*time.Second),
|
||||
vaultconfig.Token(cfg.Vault.Token),
|
||||
vaultconfig.Path(cfg.Vault.Path),
|
||||
)
|
||||
}),
|
||||
)
|
||||
|
||||
if err := config.Load(ctx,
|
||||
[]config.Config{
|
||||
config.NewConfig( // load from defaults
|
||||
config.Struct(cfg), // pass config struct
|
||||
),
|
||||
fileconfig.NewConfig( // load from file
|
||||
config.AllowFail(true), // that may be not exists
|
||||
config.Struct(cfg), // pass config struct
|
||||
config.Codec(yamlcodec.NewCodec()), // file config in json
|
||||
fileconfig.Path("./local.yaml"), // nearby file
|
||||
),
|
||||
envconfig.NewConfig( // load from environment
|
||||
config.Struct(cfg), // pass config struct
|
||||
),
|
||||
vc,
|
||||
}, config.LoadOverride(true),
|
||||
); err != nil {
|
||||
log.Fatal(ctx, "failed to load config: %v", err)
|
||||
}
|
||||
|
||||
if err := config.Validate(ctx, cfg); err != nil {
|
||||
log.Fatal(ctx, "failed to validate config: %v", err)
|
||||
}
|
||||
|
||||
swaggerui.Config["url"] = "../service.swagger.yaml"
|
||||
|
||||
meter.DefaultMeter = victoriameter.NewMeter(
|
||||
meter.Path(cfg.Meter.Path),
|
||||
meter.WriteFDMetrics(true),
|
||||
meter.WriteProcessMetrics(true),
|
||||
meter.Address(cfg.Meter.Addr),
|
||||
)
|
||||
|
||||
svc := micro.NewService()
|
||||
|
||||
if err := svc.Init(
|
||||
micro.Server(httpsrv.NewServer()),
|
||||
micro.Name(cfg.Server.Name),
|
||||
micro.Version(cfg.Server.Version),
|
||||
); err != nil {
|
||||
log.Fatal(ctx, "failed to init service: %v", err)
|
||||
}
|
||||
|
||||
assetsUI, err := fs.Sub(assets, "assets/ui")
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to get assets: %v", err)
|
||||
}
|
||||
|
||||
if err := svc.Server("http").Init(
|
||||
server.Address(cfg.Server.Addr),
|
||||
server.Name(cfg.Server.Name),
|
||||
server.Version(cfg.Server.Version),
|
||||
server.Codec("application/json", jsonpbcodec.NewCodec()),
|
||||
httpsrv.PathHandler(http.MethodGet, "/ui/*", spahandler.Handler("/ui/", assetsUI)),
|
||||
httpsrv.PathHandler(http.MethodHead, "/ui/*", spahandler.Handler("/ui/", assetsUI)),
|
||||
httpsrv.PathHandler(http.MethodGet, "/swagger-ui/*", swaggerui.Handler("/swagger-ui")),
|
||||
); err != nil {
|
||||
log.Fatal(ctx, "failed to init service: %v", err)
|
||||
}
|
||||
|
||||
if err := database.ParseDSN(cfg.Database); err != nil {
|
||||
log.Fatal(ctx, "failed to init database: %v", err)
|
||||
}
|
||||
|
||||
db, err := database.Connect(ctx, cfg.Database, log)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to connect database: %v", err)
|
||||
}
|
||||
|
||||
store, err := storage.NewStorage(cfg.Database.Type, log, db)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to init storage: %v", err)
|
||||
}
|
||||
|
||||
h, err := handler.NewHandler(log, store)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to create handler: %v", err)
|
||||
}
|
||||
|
||||
if err := svc.Init(
|
||||
micro.Logger(
|
||||
log.Clone(logger.WithLevel(logger.ParseLevel(cfg.Server.LoggerLevel))),
|
||||
),
|
||||
); err != nil {
|
||||
log.Fatal(ctx, "failed to init service", err)
|
||||
}
|
||||
|
||||
if err := pb.RegisterPkgdashServer(svc.Server("http"), h); err != nil {
|
||||
log.Fatal(ctx, "failed to register handler", err)
|
||||
}
|
||||
|
||||
intsvc := httpsrv.NewServer(
|
||||
server.Codec("application/json", jsoncodec.NewCodec()),
|
||||
server.Address(cfg.Meter.Addr),
|
||||
)
|
||||
|
||||
if err := intsvc.Init(); err != nil {
|
||||
log.Fatal(ctx, "failed to init http srv: %v", err)
|
||||
}
|
||||
|
||||
if err := healthhandler.RegisterHealthServiceServer(intsvc, healthhandler.NewHandler()); err != nil {
|
||||
log.Fatal(ctx, "failed to set http handler: %v", err)
|
||||
}
|
||||
|
||||
if err := meterhandler.RegisterMeterServiceServer(intsvc, meterhandler.NewHandler()); err != nil {
|
||||
log.Fatal(ctx, "failed to set http handler: %v", err)
|
||||
}
|
||||
|
||||
if err := intsvc.Start(); err != nil {
|
||||
log.Fatal(ctx, "failed to run http srv: %v", err)
|
||||
}
|
||||
|
||||
cw, err := vc.Watch(ctx, config.WatchCoalesce(true), config.WatchInterval(1*time.Second, 5*time.Second))
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to watch config: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := cw.Stop(); err != nil {
|
||||
log.Error(ctx, err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
changes, err := cw.Next()
|
||||
if err != nil {
|
||||
log.Error(ctx, "failed to get config update: %v", err)
|
||||
}
|
||||
for k, v := range changes {
|
||||
if err = rutil.SetFieldByPath(cfg, v, k); err != nil {
|
||||
log.Error(ctx, "failed to set config update: %v", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
for k := range changes {
|
||||
switch k {
|
||||
case "Server.LoggerLevel":
|
||||
if lvl, ok := changes[k].(string); ok {
|
||||
log.Info(ctx, "logger level changed to %s", lvl)
|
||||
log.Level(logger.ParseLevel(lvl))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
worker.Run(ctx, log, store, time.Duration(cfg.App.CheckInterval))
|
||||
}()
|
||||
|
||||
if err = svc.Run(); err != nil {
|
||||
log.Fatal(ctx, "failed to run svc: %v", err)
|
||||
}
|
||||
}
|
756
cmd/pkgdashcli/main.go
Normal file
756
cmd/pkgdashcli/main.go
Normal file
@@ -0,0 +1,756 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
stdslog "log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-git/v5"
|
||||
gitconfig "github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/object"
|
||||
httpauth "github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||
"github.com/jdx/go-netrc"
|
||||
yamlcodec "go.unistack.org/micro-codec-yaml/v3"
|
||||
envconfig "go.unistack.org/micro-config-env/v3"
|
||||
fileconfig "go.unistack.org/micro-config-file/v3"
|
||||
microflag "go.unistack.org/micro-config-flag/v3"
|
||||
"go.unistack.org/micro/v3/config"
|
||||
"go.unistack.org/micro/v3/logger"
|
||||
"go.unistack.org/micro/v3/logger/slog"
|
||||
"go.unistack.org/pkgdash/internal/configcli"
|
||||
"go.unistack.org/pkgdash/internal/modules"
|
||||
"go.unistack.org/pkgdash/internal/source"
|
||||
"golang.org/x/mod/modfile"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// https://docs.github.com/ru/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
|
||||
var initMsg = `
|
||||
Pkgdashcli allows you to define a version update for a dependency and start
|
||||
merge requests in version control systems.
|
||||
|
||||
Usage:
|
||||
pkgdashcli --command {{command}} --path {{name of dep}}
|
||||
|
||||
Commands:
|
||||
checkupdate | CheckUpdate collects a list of dependencies with the latest updates.
|
||||
list | Returns a list of PR for this repository with update dependencies.
|
||||
update --path {{name of one dep or empty for update all dep}} | Creates a PR with the specified dependency update in path or creates a PR with dependency updates for all modules if path is empty.
|
||||
close --path {{name of one dep or empty for close all pr}} | Closes the PR for the specified dependency or closes all PRs with dependency updates if path is empty .
|
||||
|
||||
Flags:
|
||||
--command | The command to execute
|
||||
--path | The name of the module to create/close the PR, if empty, the command is executed for all modules.
|
||||
`
|
||||
|
||||
var (
|
||||
DefaultPullRequestTitle = `Bump {{.Name}} from {{.VersionOld}} to {{.VersionNew}}`
|
||||
DefaultPullRequestBody = `Bumps {{.Name}} from {{.VersionOld}} to {{.VersionNew}}`
|
||||
)
|
||||
|
||||
var (
|
||||
configFiles = []string{
|
||||
"dependabot.yml",
|
||||
"pkgdashcli.yml",
|
||||
"pkgdashcli.yaml",
|
||||
}
|
||||
configDirs = []string{
|
||||
".gitea",
|
||||
".github",
|
||||
".gitlab",
|
||||
}
|
||||
repoMgmt = map[string]string{
|
||||
".gitea": "gitea",
|
||||
".gogs": "gogs",
|
||||
".github": "github",
|
||||
".gitlab": "gitlab",
|
||||
}
|
||||
repoAPI = map[string]string{
|
||||
".gitea": "git.unistack.org",
|
||||
".gogs": "gogs",
|
||||
".github": "github.com/unistack-org",
|
||||
".gitlab": "gitlab.mtsbank.ru",
|
||||
}
|
||||
)
|
||||
|
||||
type Data struct {
|
||||
Modules map[string]modules.Update
|
||||
}
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
log := slog.NewLogger(slog.WithHandlerFunc(stdslog.NewTextHandler))
|
||||
|
||||
if err = log.Init(logger.WithLevel(logger.DebugLevel)); err != nil {
|
||||
log.Error(ctx, fmt.Sprintf("logger init error: %v", err))
|
||||
}
|
||||
|
||||
cfg := configcli.NewConfig()
|
||||
|
||||
if err = config.Load(ctx,
|
||||
[]config.Config{
|
||||
config.NewConfig(
|
||||
config.Struct(cfg),
|
||||
),
|
||||
envconfig.NewConfig(
|
||||
config.Struct(cfg),
|
||||
),
|
||||
},
|
||||
config.LoadOverride(true),
|
||||
); err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("failed to load config: %v", err))
|
||||
}
|
||||
|
||||
for _, configDir := range configDirs {
|
||||
for _, configFile := range configFiles {
|
||||
path := filepath.Join(configDir, configFile)
|
||||
if _, err = os.Stat(path); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
c := fileconfig.NewConfig(
|
||||
config.AllowFail(false),
|
||||
config.Struct(cfg),
|
||||
config.Codec(yamlcodec.NewCodec()),
|
||||
fileconfig.Path(path),
|
||||
)
|
||||
err = c.Init()
|
||||
if err != nil {
|
||||
log.Error(ctx, fmt.Sprintf("failed to init config: %v", err))
|
||||
}
|
||||
if err = c.Load(ctx, config.LoadOverride(true)); err != nil {
|
||||
log.Error(ctx, fmt.Sprintf("failed to load config: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.PullRequestBody == "" {
|
||||
cfg.PullRequestBody = DefaultPullRequestBody
|
||||
}
|
||||
|
||||
if cfg.PullRequestTitle == "" {
|
||||
cfg.PullRequestTitle = DefaultPullRequestTitle
|
||||
}
|
||||
|
||||
cliCfg := &configcli.Cli{}
|
||||
c := microflag.NewConfig(config.Struct(cliCfg), microflag.FlagErrorHandling(flag.ContinueOnError))
|
||||
|
||||
if err = c.Init(); err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("init cli cfg failed: %v", err))
|
||||
}
|
||||
|
||||
if err = c.Load(ctx); err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("load cli cfg failed: %v", err))
|
||||
}
|
||||
|
||||
if cliCfg.Path == "" && cliCfg.Command == "" {
|
||||
fmt.Print(initMsg)
|
||||
return
|
||||
}
|
||||
|
||||
path := "."
|
||||
if len(os.Args) > 1 {
|
||||
path = os.Args[1]
|
||||
}
|
||||
|
||||
name, err := modules.FindModFile(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
buf, err := os.ReadFile(name)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to read file", err)
|
||||
}
|
||||
mfile, err := modfile.Parse(name, buf, nil)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to parse file", err)
|
||||
}
|
||||
|
||||
mvs := make(map[string]modules.Update)
|
||||
|
||||
updateOptions := modules.UpdateOptions{
|
||||
Pre: cfg.UpdateOpt.Pre,
|
||||
Major: cfg.UpdateOpt.Major,
|
||||
UpMajor: cfg.UpdateOpt.UpMajor,
|
||||
Cached: cfg.UpdateOpt.Cached,
|
||||
OnUpdate: func(u modules.Update) {
|
||||
var modpath string // new mod path with major
|
||||
if u.Err != nil {
|
||||
log.Error(ctx, fmt.Sprintf("%s: failed: %v", u.Module.Path, u.Err))
|
||||
return
|
||||
}
|
||||
modpath = u.Module.Path
|
||||
v := semver.Major(u.Version)
|
||||
p := modules.ModPrefix(modpath)
|
||||
if !strings.HasPrefix(u.Module.Version, v) && v != "v1" && v != "v0" {
|
||||
switch strings.HasPrefix(u.Module.Path, "gopkg.in") {
|
||||
case true:
|
||||
modpath = p + "." + v
|
||||
case false:
|
||||
modpath = p + "/" + v
|
||||
}
|
||||
}
|
||||
mvs[modpath] = u
|
||||
},
|
||||
}
|
||||
|
||||
for _, req := range mfile.Require {
|
||||
updateOptions.Modules = append(updateOptions.Modules, req.Mod)
|
||||
}
|
||||
|
||||
modules.Updates(updateOptions)
|
||||
|
||||
if err = getRepoMgmt(ctx, log, cfg); err != nil { // Filling in empty config fields.
|
||||
log.Error(ctx, err.Error())
|
||||
}
|
||||
|
||||
if len(cfg.Branches) == 0 {
|
||||
branchName, err := getCurrentBranch(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to get current branch", err)
|
||||
}
|
||||
cfg.Branches = append(cfg.Branches, branchName)
|
||||
}
|
||||
|
||||
if cfg.Source.Owner == "" {
|
||||
owner, err := getOwnerRepository(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to get current repository", err)
|
||||
}
|
||||
cfg.Source.Owner = owner
|
||||
}
|
||||
|
||||
if cfg.Source.Repository == "" {
|
||||
repository, err := getCurrentRepository(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to get current repository", err)
|
||||
}
|
||||
cfg.Source.Repository = repository
|
||||
}
|
||||
|
||||
gitSource := source.NewSourceControl(*cfg, log)
|
||||
|
||||
Execute(ctx, log, gitSource, mvs, *cliCfg, *cfg)
|
||||
|
||||
log.Info(ctx, "Pkgdash successfully updated dependencies")
|
||||
}
|
||||
|
||||
func Execute(ctx context.Context, log logger.Logger, gitSource source.SourceControl, mvs map[string]modules.Update, cliCfg configcli.Cli, cfg configcli.Config) {
|
||||
var mod modules.Update
|
||||
var ok bool
|
||||
var path string
|
||||
prList := make(map[string]map[string]string)
|
||||
|
||||
switch cliCfg.Command {
|
||||
case "checkupdate":
|
||||
js, err := json.Marshal(mvs)
|
||||
fmt.Println(fmt.Sprintf(`Modules get update: %s, %s`, js, err))
|
||||
case "open":
|
||||
if cliCfg.Path != "" { // update one dep
|
||||
path = cliCfg.Path
|
||||
if mod, ok = mvs[path]; !ok {
|
||||
log.Fatal(ctx, fmt.Sprintf("For %s update not exist", path))
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Start update %s from %s to %s", path, mod.Module.Version, mod.Version))
|
||||
for _, branch := range cfg.Branches {
|
||||
if err := gitSource.RequestOpen(ctx, branch, path, mod); err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("failed to create pr: %v", err))
|
||||
}
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Update successful for %s", path))
|
||||
return
|
||||
}
|
||||
for _, branch := range cfg.Branches { // update all dep
|
||||
for path, mod = range mvs {
|
||||
log.Debug(ctx, fmt.Sprintf("Start update %s from %s to %s", path, mod.Module.Version, mod.Version))
|
||||
err := gitSource.RequestOpen(ctx, branch, path, mod)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "already exists") {
|
||||
log.Debug(ctx, fmt.Sprintf("skip %s, branch already exists", path))
|
||||
continue
|
||||
}
|
||||
log.Fatal(ctx, fmt.Sprintf("failed to create pr: %v", err))
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Update successful for %s", path))
|
||||
}
|
||||
}
|
||||
case "update":
|
||||
if cliCfg.Path != "" { // update one dep
|
||||
path = cliCfg.Path
|
||||
if mod, ok = mvs[path]; !ok {
|
||||
log.Fatal(ctx, fmt.Sprintf("For %s update not exist", path))
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Start update %s from %s to %s", path, mod.Module.Version, mod.Version))
|
||||
for _, branch := range cfg.Branches {
|
||||
if err := gitSource.RequestUpdate(ctx, branch, path, mod); err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("failed to create pr: %v", err))
|
||||
}
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Update successful for %s", path))
|
||||
return
|
||||
}
|
||||
for _, branch := range cfg.Branches { // update all dep
|
||||
for path, mod = range mvs {
|
||||
log.Debug(ctx, fmt.Sprintf("Start update %s from %s to %s", path, mod.Module.Version, mod.Version))
|
||||
err := gitSource.RequestUpdate(ctx, branch, path, mod)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "already exists") {
|
||||
log.Debug(ctx, fmt.Sprintf("skip %s, branch already exists", path))
|
||||
continue
|
||||
}
|
||||
log.Fatal(ctx, fmt.Sprintf("failed to create pr: %v", err))
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Update successful for %s", path))
|
||||
}
|
||||
}
|
||||
case "close":
|
||||
if cliCfg.Path != "" { // close one dep
|
||||
path = cliCfg.Path
|
||||
log.Debug(ctx, fmt.Sprintf("Start close for %s", path))
|
||||
for _, branch := range cfg.Branches {
|
||||
if err := gitSource.RequestClose(ctx, branch, path); err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("failed to close pr: %v", err))
|
||||
}
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Close successful for %s", path))
|
||||
return
|
||||
}
|
||||
for _, branch := range cfg.Branches {
|
||||
log.Info(ctx, fmt.Sprintf("Start getting pr for %s", branch))
|
||||
rMap, err := gitSource.RequestList(ctx, branch)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("Error with getting pr list for branch: %s", branch))
|
||||
}
|
||||
|
||||
log.Info(ctx, fmt.Sprintf("for %s:\n%s", branch, rMap))
|
||||
log.Info(ctx, fmt.Sprintf("Start close pr for base branch %s", branch))
|
||||
|
||||
for path = range rMap {
|
||||
log.Debug(ctx, fmt.Sprintf("Start close for %s", path))
|
||||
if err = gitSource.RequestClose(ctx, branch, path); err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("failed to close pr: %v", err))
|
||||
}
|
||||
log.Debug(ctx, fmt.Sprintf("Close successful for %s", path))
|
||||
}
|
||||
}
|
||||
case "list":
|
||||
for _, branch := range cfg.Branches {
|
||||
rMap, err := gitSource.RequestList(ctx, branch)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("RequestList: error %s", err))
|
||||
}
|
||||
|
||||
prList[branch] = rMap
|
||||
}
|
||||
js, err := json.Marshal(prList)
|
||||
if err != nil {
|
||||
log.Error(ctx, fmt.Sprintf("error: %s", err))
|
||||
}
|
||||
fmt.Printf("for %s:\n%s\n", cfg.Source.Repository, js)
|
||||
default:
|
||||
fmt.Print(initMsg)
|
||||
}
|
||||
}
|
||||
|
||||
func getCurrentRepository(ctx context.Context) (string, error) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
p := filepath.Clean(wd)
|
||||
|
||||
repo, err := git.PlainOpen(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cfg, err := repo.Config()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for k, v := range cfg.Remotes {
|
||||
if k != "origin" {
|
||||
continue
|
||||
}
|
||||
|
||||
u, err := url.Parse(v.URLs[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return filepath.Base(u.Path), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to get remotes")
|
||||
}
|
||||
|
||||
func getOwnerRepository(ctx context.Context) (string, error) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
p := filepath.Clean(wd)
|
||||
|
||||
repo, err := git.PlainOpen(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cfg, err := repo.Config()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for k, v := range cfg.Remotes {
|
||||
if k != "origin" {
|
||||
continue
|
||||
}
|
||||
|
||||
u, err := url.Parse(v.URLs[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return filepath.Base(filepath.Dir(u.Path)), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to get remotes")
|
||||
}
|
||||
|
||||
func getCurrentBranch(ctx context.Context) (string, error) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
p := filepath.Clean(wd)
|
||||
|
||||
repo, err := git.PlainOpen(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ref, err := repo.Head()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ref.Name().Short(), nil
|
||||
}
|
||||
|
||||
func getRepoMgmt(ctx context.Context, log logger.Logger, cfg *configcli.Config) error {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := filepath.Clean(wd)
|
||||
for _, configDir := range configDirs {
|
||||
_, err := os.Stat(filepath.Join(p, configDir))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info(ctx, fmt.Sprintf("check config dir %s", configDir))
|
||||
if name, ok := repoMgmt[configDir]; ok && cfg.Source.TypeGit == "" {
|
||||
cfg.Source.TypeGit = name
|
||||
}
|
||||
if api, ok := repoAPI[configDir]; ok && cfg.Source.APIURL == "" {
|
||||
cfg.Source.APIURL = api
|
||||
}
|
||||
}
|
||||
if p == "/" && cfg.Source.TypeGit == "" && cfg.Source.APIURL == "" {
|
||||
return fmt.Errorf("unknown")
|
||||
}
|
||||
// p = filepath.Clean(filepath.Join(p, ".."))
|
||||
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
log.Fatal(ctx, fmt.Sprintf("pkgdash/main cant get info about user: %s", err))
|
||||
}
|
||||
|
||||
log.Info(ctx, fmt.Sprintf("try to configure scm source %v", cfg.Source))
|
||||
|
||||
netrcfile := filepath.Join(usr.HomeDir, ".netrc")
|
||||
log.Info(ctx, "try to parse netrc file "+netrcfile)
|
||||
n, err := netrc.Parse(netrcfile)
|
||||
if err != nil {
|
||||
log.Error(ctx, "pkgdash/main cant parse .netrc: %s", err)
|
||||
}
|
||||
|
||||
log.Info(ctx, "try to configure scm for "+cfg.Source.APIURL)
|
||||
if cfg.Source.Username == "" {
|
||||
cfg.Source.Username = n.Machine(cfg.Source.APIURL).Get("login")
|
||||
}
|
||||
if cfg.Source.Password == "" {
|
||||
cfg.Source.Password = n.Machine(cfg.Source.APIURL).Get("password")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func giteaPullRequest(ctx context.Context, log logger.Logger, cfg *configcli.Config, branch string, mods map[string]modules.Update) error {
|
||||
envAPIURL := os.Getenv("GITHUB_API_URL")
|
||||
envREPOSITORY := os.Getenv("GITHUB_REPOSITORY")
|
||||
envTOKEN := os.Getenv("GITHUB_TOKEN")
|
||||
|
||||
var buf []byte
|
||||
var err error
|
||||
|
||||
tplTitle, err := template.New("pull_request_title").Parse(cfg.PullRequestTitle)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to parse template: %v", err)
|
||||
}
|
||||
|
||||
wTitle := bytes.NewBuffer(nil)
|
||||
|
||||
tplBody, err := template.New("pull_request_body").Parse(cfg.PullRequestBody)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to parse template: %v", err)
|
||||
}
|
||||
|
||||
wBody := bytes.NewBuffer(nil)
|
||||
|
||||
repo, err := git.PlainOpenWithOptions(".", &git.PlainOpenOptions{DetectDotGit: true})
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to open repo: %v", err)
|
||||
}
|
||||
|
||||
if err = repo.FetchContext(ctx, &git.FetchOptions{
|
||||
Auth: &httpauth.BasicAuth{Username: envTOKEN, Password: envTOKEN},
|
||||
Force: true,
|
||||
}); err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
log.Fatal(ctx, "failed to fetch repo: %v", err)
|
||||
}
|
||||
|
||||
var headRef *plumbing.Reference
|
||||
refIter, err := repo.Branches()
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to get branches: %v", err)
|
||||
}
|
||||
for {
|
||||
ref, err := refIter.Next()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if ref.Name().String() == branch {
|
||||
headRef = ref
|
||||
break
|
||||
}
|
||||
}
|
||||
refIter.Close()
|
||||
|
||||
if headRef == nil {
|
||||
log.Fatal(ctx, "failed to get repo branch head")
|
||||
}
|
||||
|
||||
log.Info(ctx, "repo head %s", headRef)
|
||||
|
||||
wtree, err := repo.Worktree()
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to get worktree: %v", err)
|
||||
}
|
||||
|
||||
type giteaPull struct {
|
||||
URL string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Base struct {
|
||||
Ref string `json:"ref"`
|
||||
} `json:"base"`
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
|
||||
var pulls []*giteaPull
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, envAPIURL+"/repos/"+envREPOSITORY+"/pulls?state=open&token="+envTOKEN, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
rsp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf, _ = io.ReadAll(rsp.Body)
|
||||
if rsp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unknown error: %s", buf)
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(buf, &pulls); err != nil {
|
||||
log.Fatal(ctx, "failed to decode response %s err: %v", buf, err)
|
||||
}
|
||||
|
||||
for path := range mods {
|
||||
for _, pull := range pulls {
|
||||
if strings.Contains(pull.Title, path) && pull.Base.Ref == branch {
|
||||
log.Info(ctx, "skip %s as pr already exists %s", path, pull.URL)
|
||||
delete(mods, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for path, mod := range mods {
|
||||
wTitle.Reset()
|
||||
wBody.Reset()
|
||||
|
||||
log.Info(ctx, "update %s from %s to %s", path, mod.Module.Version, mod.Version)
|
||||
|
||||
log.Info(ctx, "reset worktree")
|
||||
if err = wtree.Reset(&git.ResetOptions{Mode: git.HardReset}); err != nil {
|
||||
log.Fatal(ctx, "failed to reset repo branch: %v", err)
|
||||
}
|
||||
|
||||
if err = wtree.PullContext(ctx, &git.PullOptions{
|
||||
Auth: &httpauth.BasicAuth{Username: envTOKEN, Password: envTOKEN},
|
||||
Depth: 1,
|
||||
// RemoteURL :
|
||||
Force: true,
|
||||
RemoteName: "origin",
|
||||
}); err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
log.Fatal(ctx, "failed to pull repo: %v", err)
|
||||
}
|
||||
|
||||
log.Info(ctx, "checkout ref %s", headRef)
|
||||
if err = wtree.Checkout(&git.CheckoutOptions{
|
||||
Hash: headRef.Hash(),
|
||||
Branch: plumbing.NewBranchReferenceName(fmt.Sprintf("pkgdash/go_modules/%s-%s", path, mod.Version)),
|
||||
Create: true,
|
||||
Force: true,
|
||||
}); err != nil {
|
||||
log.Fatal(ctx, "failed to checkout tree: %v", err)
|
||||
}
|
||||
|
||||
epath, err := exec.LookPath("go")
|
||||
if errors.Is(err, exec.ErrDot) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to find go command: %v", err)
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
var out []byte
|
||||
|
||||
cmd = exec.CommandContext(ctx, epath, "mod", "edit", fmt.Sprintf("-require=%s@%s", path, mod.Version))
|
||||
if out, err = cmd.CombinedOutput(); err != nil {
|
||||
log.Fatal(ctx, "failed to run go mod edit: %s err: %v", out, err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, epath, "mod", "tidy")
|
||||
if out, err = cmd.CombinedOutput(); err != nil {
|
||||
log.Fatal(ctx, "failed to run go mod tidy: %s err: %v", out, err)
|
||||
}
|
||||
|
||||
log.Info(ctx, "worktree add go.mod")
|
||||
if _, err = wtree.Add("go.mod"); err != nil {
|
||||
log.Fatal(ctx, "failed to add file: %v", err)
|
||||
}
|
||||
|
||||
log.Info(ctx, "worktree add go.sum")
|
||||
if _, err = wtree.Add("go.sum"); err != nil {
|
||||
log.Fatal(ctx, "failed to add file: %v", err)
|
||||
}
|
||||
|
||||
log.Info(ctx, "worktree commit")
|
||||
_, err = wtree.Commit(wTitle.String(), &git.CommitOptions{
|
||||
Parents: []plumbing.Hash{headRef.Hash()},
|
||||
Author: &object.Signature{
|
||||
Name: "gitea-actions",
|
||||
Email: "info@unistack.org",
|
||||
When: time.Now(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed to commit: %v", err)
|
||||
}
|
||||
|
||||
// newref := plumbing.NewHashReference(plumbing.ReferenceName(fmt.Sprintf("refs/heads/pkgdash/go_modules/%s-%s", path, mod.Version)), headRef.Hash())
|
||||
|
||||
/*
|
||||
if err = repo.Storer.SetReference(newref); err != nil {
|
||||
log.Fatal(ctx, "failed to create repo branch: %v", err)
|
||||
}
|
||||
*/
|
||||
|
||||
refspec := gitconfig.RefSpec(fmt.Sprintf("+refs/heads/pkgdash/go_modules/%s-%s:refs/heads/pkgdash/go_modules/%s-%s", path, mod.Version, path, mod.Version))
|
||||
|
||||
log.Info(ctx, "try to push refspec %s", refspec)
|
||||
|
||||
if err = repo.PushContext(ctx, &git.PushOptions{
|
||||
RefSpecs: []gitconfig.RefSpec{refspec},
|
||||
Auth: &httpauth.BasicAuth{Username: envTOKEN, Password: envTOKEN},
|
||||
Force: true,
|
||||
}); err != nil {
|
||||
log.Fatal(ctx, "failed to push repo branch: %v", err)
|
||||
}
|
||||
|
||||
data := map[string]string{
|
||||
"Name": path,
|
||||
"VersionOld": mod.Module.Version,
|
||||
"VersionNew": mod.Version,
|
||||
}
|
||||
|
||||
if err = tplTitle.Execute(wTitle, data); err != nil {
|
||||
log.Fatal(ctx, "failed to execute template: %v", err)
|
||||
}
|
||||
if err = tplBody.Execute(wBody, data); err != nil {
|
||||
log.Fatal(ctx, "failed to execute template: %v", err)
|
||||
}
|
||||
|
||||
body := map[string]string{
|
||||
"base": branch,
|
||||
"body": wBody.String(),
|
||||
"head": fmt.Sprintf("pkgdash/go_modules/%s-%s", path, mod.Version),
|
||||
"title": wTitle.String(),
|
||||
}
|
||||
log.Info(ctx, "raw body: %#+v", body)
|
||||
|
||||
buf, err = json.Marshal(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info(ctx, "marshal body: %s", buf)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, envAPIURL+"/repos/"+envREPOSITORY+"/pulls?token="+envTOKEN, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Add("Accept", "application/json")
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
rsp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rsp.StatusCode != http.StatusCreated {
|
||||
buf, _ = io.ReadAll(rsp.Body)
|
||||
return fmt.Errorf("unknown error: %s", buf)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user