New Provider: OpsGenie (#11012)

* Importing the OpsGenie SDK

* Adding the goreq dependency

* Initial commit of the OpsGenie / User provider

* Refactoring to return a single client

* Adding an import test / fixing a copy/paste error

* Adding support for OpsGenie docs

* Scaffolding the user documentation for OpsGenie

* Adding a TODO

* Adding the User data source

* Documentation for OpsGenie

* Adding OpsGenie to the internal plugin list

* Adding support for Teams

* Documentation for OpsGenie Team's

* Validation for Teams

* Removing Description for now

* Optional fields for a User: Locale/Timezone

* Removing an implemented TODO

* Running makefmt

* Downloading about half the internet

Someone witty might simply sign this commit with "npm install"

* Adding validation to the user object

* Fixing the docs

* Adding a test creating multple users

* Prompting for the API Key if it's not specified

* Added a test for multiple users / requested changes

* Fixing the linting
This commit is contained in:
Tom Harvey 2017-01-05 19:25:04 +00:00 committed by Paul Stack
parent f4c5b9fada
commit 05d00a93ce
101 changed files with 13717 additions and 7 deletions

View File

@ -0,0 +1,12 @@
package main
import (
"github.com/hashicorp/terraform/builtin/providers/opsgenie"
"github.com/hashicorp/terraform/plugin"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: opsgenie.Provider,
})
}

View File

@ -78,7 +78,7 @@ func resourceArmCdnProfileCreate(d *schema.ResourceData, meta interface{}) error
return err
}
if read.ID == nil {
return fmt.Errorf("Cannot read CND Profile %s (resource group %s) ID", name, resGroup)
return fmt.Errorf("Cannot read CDN Profile %s (resource group %s) ID", name, resGroup)
}
d.SetId(*read.ID)

View File

@ -0,0 +1,46 @@
package opsgenie
import (
"log"
"golang.org/x/net/context"
"github.com/opsgenie/opsgenie-go-sdk/client"
)
type OpsGenieClient struct {
apiKey string
StopContext context.Context
teams client.OpsGenieTeamClient
users client.OpsGenieUserClient
}
// Config defines the configuration options for the OpsGenie client
type Config struct {
ApiKey string
}
// Client returns a new OpsGenie client
func (c *Config) Client() (*OpsGenieClient, error) {
opsGenie := new(client.OpsGenieClient)
opsGenie.SetAPIKey(c.ApiKey)
client := OpsGenieClient{}
log.Printf("[INFO] OpsGenie client configured")
teamsClient, err := opsGenie.Team()
if err != nil {
return nil, err
}
client.teams = *teamsClient
usersClient, err := opsGenie.User()
if err != nil {
return nil, err
}
client.users = *usersClient
return &client, nil
}

View File

@ -0,0 +1,66 @@
package opsgenie
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/opsgenie/opsgenie-go-sdk/user"
)
func dataSourceOpsGenieUser() *schema.Resource {
return &schema.Resource{
Read: dataSourceOpsGenieUserRead,
Schema: map[string]*schema.Schema{
"username": {
Type: schema.TypeString,
Required: true,
},
"full_name": {
Type: schema.TypeString,
Computed: true,
},
"role": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceOpsGenieUserRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*OpsGenieClient).users
username := d.Get("username").(string)
log.Printf("[INFO] Reading OpsGenie user '%s'", username)
o := user.ListUsersRequest{}
resp, err := client.List(o)
if err != nil {
return nil
}
var found *user.GetUserResponse
if len(resp.Users) > 0 {
for _, user := range resp.Users {
if user.Username == username {
found = &user
break
}
}
}
if found == nil {
return fmt.Errorf("Unable to locate any user with the username: %s", username)
}
d.SetId(found.Id)
d.Set("username", found.Username)
d.Set("full_name", found.Fullname)
d.Set("role", found.Role)
return nil
}

View File

@ -0,0 +1,65 @@
package opsgenie
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDataSourceOpsGenieUser_Basic(t *testing.T) {
ri := acctest.RandInt()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccDataSourceOpsGenieUserConfig(ri),
Check: resource.ComposeTestCheckFunc(
testAccDataSourceOpsGenieUser("opsgenie_user.test", "data.opsgenie_user.by_username"),
),
},
},
})
}
func testAccDataSourceOpsGenieUser(src, n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
srcR := s.RootModule().Resources[src]
srcA := srcR.Primary.Attributes
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["id"] == "" {
return fmt.Errorf("Expected to get a user ID from OpsGenie")
}
testAtts := []string{"username", "full_name", "role"}
for _, att := range testAtts {
if a[att] != srcA[att] {
return fmt.Errorf("Expected the user %s to be: %s, but got: %s", att, srcA[att], a[att])
}
}
return nil
}
}
func testAccDataSourceOpsGenieUserConfig(ri int) string {
return fmt.Sprintf(`
resource "opsgenie_user" "test" {
username = "acctest-%d@example.tld"
full_name = "Acceptance Test User"
role = "User"
}
data "opsgenie_user" "by_username" {
username = "${opsgenie_user.test.username}"
}
`, ri)
}

View File

@ -0,0 +1,82 @@
package opsgenie
import (
"testing"
"fmt"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccOpsGenieTeam_importBasic(t *testing.T) {
resourceName := "opsgenie_team.test"
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieTeam_basic, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieTeamDestroy,
Steps: []resource.TestStep{
{
Config: config,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccOpsGenieTeam_importWithUser(t *testing.T) {
resourceName := "opsgenie_team.test"
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieTeam_withUser, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieTeamDestroy,
Steps: []resource.TestStep{
{
Config: config,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccOpsGenieTeam_importWithUserComplete(t *testing.T) {
resourceName := "opsgenie_team.test"
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieTeam_withUserComplete, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieTeamDestroy,
Steps: []resource.TestStep{
{
Config: config,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -0,0 +1,58 @@
package opsgenie
import (
"testing"
"fmt"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccOpsGenieUser_importBasic(t *testing.T) {
resourceName := "opsgenie_user.test"
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieUser_basic, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieUserDestroy,
Steps: []resource.TestStep{
{
Config: config,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccOpsGenieUser_importComplete(t *testing.T) {
resourceName := "opsgenie_user.test"
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieUser_complete, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieUserDestroy,
Steps: []resource.TestStep{
{
Config: config,
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -0,0 +1,42 @@
package opsgenie
import (
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
// Provider represents a resource provider in Terraform
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"api_key": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("OPSGENIE_API_KEY", nil),
},
},
DataSourcesMap: map[string]*schema.Resource{
"opsgenie_user": dataSourceOpsGenieUser(),
},
ResourcesMap: map[string]*schema.Resource{
"opsgenie_team": resourceOpsGenieTeam(),
"opsgenie_user": resourceOpsGenieUser(),
},
ConfigureFunc: providerConfigure,
}
}
func providerConfigure(data *schema.ResourceData) (interface{}, error) {
log.Println("[INFO] Initializing OpsGenie client")
config := Config{
ApiKey: data.Get("api_key").(string),
}
return config.Client()
}

View File

@ -0,0 +1,37 @@
package opsgenie
import (
"os"
"testing"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
"opsgenie": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
apiKey := os.Getenv("OPSGENIE_API_KEY")
if apiKey == "" {
t.Fatal("OPSGENIE_API_KEY must be set for acceptance tests")
}
}

View File

@ -0,0 +1,231 @@
package opsgenie
import (
"log"
"fmt"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/opsgenie/opsgenie-go-sdk/team"
"regexp"
)
func resourceOpsGenieTeam() *schema.Resource {
return &schema.Resource{
Create: resourceOpsGenieTeamCreate,
Read: resourceOpsGenieTeamRead,
Update: resourceOpsGenieTeamUpdate,
Delete: resourceOpsGenieTeamDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateOpsGenieTeamName,
},
"member": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"username": {
Type: schema.TypeString,
Required: true,
},
"role": {
Type: schema.TypeString,
Optional: true,
Default: "user",
ValidateFunc: validateOpsGenieTeamRole,
},
},
},
},
},
}
}
func resourceOpsGenieTeamCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*OpsGenieClient).teams
name := d.Get("name").(string)
createRequest := team.CreateTeamRequest{
Name: name,
Members: expandOpsGenieTeamMembers(d),
}
log.Printf("[INFO] Creating OpsGenie team '%s'", name)
createResponse, err := client.Create(createRequest)
if err != nil {
return err
}
err = checkOpsGenieResponse(createResponse.Code, createResponse.Status)
if err != nil {
return err
}
getRequest := team.GetTeamRequest{
Name: name,
}
getResponse, err := client.Get(getRequest)
if err != nil {
return err
}
d.SetId(getResponse.Id)
return resourceOpsGenieTeamRead(d, meta)
}
func resourceOpsGenieTeamRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*OpsGenieClient).teams
listRequest := team.ListTeamsRequest{}
listResponse, err := client.List(listRequest)
if err != nil {
return err
}
var found *team.GetTeamResponse
for _, team := range listResponse.Teams {
if team.Id == d.Id() {
found = &team
break
}
}
if found == nil {
d.SetId("")
log.Printf("[INFO] Team %q not found. Removing from state", d.Get("name").(string))
return nil
}
getRequest := team.GetTeamRequest{
Id: d.Id(),
}
getResponse, err := client.Get(getRequest)
if err != nil {
return err
}
d.Set("name", getResponse.Name)
d.Set("member", flattenOpsGenieTeamMembers(getResponse.Members))
return nil
}
func resourceOpsGenieTeamUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*OpsGenieClient).teams
name := d.Get("name").(string)
updateRequest := team.UpdateTeamRequest{
Id: d.Id(),
Name: name,
Members: expandOpsGenieTeamMembers(d),
}
log.Printf("[INFO] Updating OpsGenie team '%s'", name)
updateResponse, err := client.Update(updateRequest)
if err != nil {
return err
}
err = checkOpsGenieResponse(updateResponse.Code, updateResponse.Status)
if err != nil {
return err
}
return nil
}
func resourceOpsGenieTeamDelete(d *schema.ResourceData, meta interface{}) error {
log.Printf("[INFO] Deleting OpsGenie team '%s'", d.Get("name").(string))
client := meta.(*OpsGenieClient).teams
deleteRequest := team.DeleteTeamRequest{
Id: d.Id(),
}
_, err := client.Delete(deleteRequest)
if err != nil {
return err
}
return nil
}
func flattenOpsGenieTeamMembers(input []team.Member) []interface{} {
members := make([]interface{}, 0, len(input))
for _, inputMember := range input {
outputMember := make(map[string]interface{})
outputMember["username"] = inputMember.User
outputMember["role"] = inputMember.Role
members = append(members, outputMember)
}
return members
}
func expandOpsGenieTeamMembers(d *schema.ResourceData) []team.Member {
input := d.Get("member").([]interface{})
members := make([]team.Member, 0, len(input))
if input == nil {
return members
}
for _, v := range input {
config := v.(map[string]interface{})
username := config["username"].(string)
role := config["role"].(string)
member := team.Member{
User: username,
Role: role,
}
members = append(members, member)
}
return members
}
func validateOpsGenieTeamName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z0-9_]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only alpha numeric characters and underscores are allowed in %q: %q", k, value))
}
if len(value) >= 100 {
errors = append(errors, fmt.Errorf("%q cannot be longer than 100 characters: %q %d", k, value, len(value)))
}
return
}
func validateOpsGenieTeamRole(v interface{}, k string) (ws []string, errors []error) {
value := strings.ToLower(v.(string))
families := map[string]bool{
"admin": true,
"user": true,
}
if !families[value] {
errors = append(errors, fmt.Errorf("OpsGenie Team Role can only be 'Admin' or 'User'"))
}
return
}

View File

@ -0,0 +1,270 @@
package opsgenie
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/opsgenie/opsgenie-go-sdk/team"
)
func TestAccOpsGenieTeamName_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "hello-world",
ErrCount: 1,
},
{
Value: "hello_world",
ErrCount: 0,
},
{
Value: "helloWorld",
ErrCount: 0,
},
{
Value: "helloworld12",
ErrCount: 0,
},
{
Value: "hello@world",
ErrCount: 1,
},
{
Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd3324120",
ErrCount: 0,
},
{
Value: "qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd33241202qfvbdsbvipqdbwsbddbdcwqffewsqwcdw21ddwqwd33241202",
ErrCount: 0,
},
{
Value: "qfvbdsbvipqdbwsbddbdcwqfjjfewsqwcdw21ddwqwd3324120qfvbdsbvipqdbwsbddbdcwqfjjfewsqwcdw21ddwqwd3324120",
ErrCount: 1,
},
}
for _, tc := range cases {
_, errors := validateOpsGenieTeamName(tc.Value, "opsgenie_team")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the OpsGenie Team Name to trigger a validation error: %v", errors)
}
}
}
func TestAccOpsGenieTeamRole_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "admin",
ErrCount: 0,
},
{
Value: "user",
ErrCount: 0,
},
{
Value: "custom",
ErrCount: 1,
},
}
for _, tc := range cases {
_, errors := validateOpsGenieTeamRole(tc.Value, "opsgenie_team")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the OpsGenie Team Role to trigger a validation error")
}
}
}
func TestAccOpsGenieTeam_basic(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieTeam_basic, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieTeamDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckOpsGenieTeamExists("opsgenie_team.test"),
),
},
},
})
}
func TestAccOpsGenieTeam_withUser(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieTeam_withUser, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieTeamDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckOpsGenieTeamExists("opsgenie_team.test"),
),
},
},
})
}
func TestAccOpsGenieTeam_withUserComplete(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieTeam_withUserComplete, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieTeamDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckOpsGenieTeamExists("opsgenie_team.test"),
),
},
},
})
}
func TestAccOpsGenieTeam_withMultipleUsers(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieTeam_withMultipleUsers, ri, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieTeamDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckOpsGenieTeamExists("opsgenie_team.test"),
),
},
},
})
}
func testCheckOpsGenieTeamDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*OpsGenieClient).teams
for _, rs := range s.RootModule().Resources {
if rs.Type != "opsgenie_team" {
continue
}
req := team.GetTeamRequest{
Id: rs.Primary.Attributes["id"],
}
result, _ := client.Get(req)
if result != nil {
return fmt.Errorf("Team still exists:\n%#v", result)
}
}
return nil
}
func testCheckOpsGenieTeamExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
id := rs.Primary.Attributes["id"]
name := rs.Primary.Attributes["name"]
client := testAccProvider.Meta().(*OpsGenieClient).teams
req := team.GetTeamRequest{
Id: rs.Primary.Attributes["id"],
}
result, _ := client.Get(req)
if result == nil {
return fmt.Errorf("Bad: Team %q (name: %q) does not exist", id, name)
}
return nil
}
}
var testAccOpsGenieTeam_basic = `
resource "opsgenie_team" "test" {
name = "acctest%d"
}
`
var testAccOpsGenieTeam_withUser = `
resource "opsgenie_user" "test" {
username = "acctest-%d@example.tld"
full_name = "Acceptance Test User"
role = "User"
}
resource "opsgenie_team" "test" {
name = "acctest%d"
member {
username = "${opsgenie_user.test.username}"
}
}
`
var testAccOpsGenieTeam_withUserComplete = `
resource "opsgenie_user" "test" {
username = "acctest-%d@example.tld"
full_name = "Acceptance Test User"
role = "User"
}
resource "opsgenie_team" "test" {
name = "acctest%d"
member {
username = "${opsgenie_user.test.username}"
role = "user"
}
}
`
var testAccOpsGenieTeam_withMultipleUsers = `
resource "opsgenie_user" "first" {
username = "acctest-1-%d@example.tld"
full_name = "First Acceptance Test User"
role = "User"
}
resource "opsgenie_user" "second" {
username = "acctest-2-%d@example.tld"
full_name = "Second Acceptance Test User"
role = "User"
}
resource "opsgenie_team" "test" {
name = "acctest%d"
member {
username = "${opsgenie_user.first.username}"
}
member {
username = "${opsgenie_user.second.username}"
}
}
`

View File

@ -0,0 +1,211 @@
package opsgenie
import (
"log"
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/opsgenie/opsgenie-go-sdk/user"
)
func resourceOpsGenieUser() *schema.Resource {
return &schema.Resource{
Create: resourceOpsGenieUserCreate,
Read: resourceOpsGenieUserRead,
Update: resourceOpsGenieUserUpdate,
Delete: resourceOpsGenieUserDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"username": {
Type: schema.TypeString,
ForceNew: true,
Required: true,
ValidateFunc: validateOpsGenieUserUsername,
},
"full_name": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateOpsGenieUserFullName,
},
"role": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateOpsGenieUserRole,
},
"locale": {
Type: schema.TypeString,
Optional: true,
Default: "en_US",
},
"timezone": {
Type: schema.TypeString,
Optional: true,
Default: "America/New_York",
},
},
}
}
func resourceOpsGenieUserCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*OpsGenieClient).users
username := d.Get("username").(string)
fullName := d.Get("full_name").(string)
role := d.Get("role").(string)
locale := d.Get("locale").(string)
timeZone := d.Get("timezone").(string)
createRequest := user.CreateUserRequest{
Username: username,
Fullname: fullName,
Role: role,
Locale: locale,
Timezone: timeZone,
}
log.Printf("[INFO] Creating OpsGenie user '%s'", username)
createResponse, err := client.Create(createRequest)
if err != nil {
return err
}
err = checkOpsGenieResponse(createResponse.Code, createResponse.Status)
if err != nil {
return err
}
getRequest := user.GetUserRequest{
Username: username,
}
getResponse, err := client.Get(getRequest)
if err != nil {
return err
}
d.SetId(getResponse.Id)
return resourceOpsGenieUserRead(d, meta)
}
func resourceOpsGenieUserRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*OpsGenieClient).users
listRequest := user.ListUsersRequest{}
listResponse, err := client.List(listRequest)
if err != nil {
return err
}
var found *user.GetUserResponse
for _, user := range listResponse.Users {
if user.Id == d.Id() {
found = &user
break
}
}
if found == nil {
d.SetId("")
log.Printf("[INFO] User %q not found. Removing from state", d.Get("username").(string))
return nil
}
getRequest := user.GetUserRequest{
Id: d.Id(),
}
getResponse, err := client.Get(getRequest)
if err != nil {
return err
}
d.Set("username", getResponse.Username)
d.Set("full_name", getResponse.Fullname)
d.Set("role", getResponse.Role)
d.Set("locale", getResponse.Locale)
d.Set("timezone", getResponse.Timezone)
return nil
}
func resourceOpsGenieUserUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*OpsGenieClient).users
username := d.Get("username").(string)
fullName := d.Get("full_name").(string)
role := d.Get("role").(string)
locale := d.Get("locale").(string)
timeZone := d.Get("timezone").(string)
log.Printf("[INFO] Updating OpsGenie user '%s'", username)
updateRequest := user.UpdateUserRequest{
Id: d.Id(),
Fullname: fullName,
Role: role,
Locale: locale,
Timezone: timeZone,
}
updateResponse, err := client.Update(updateRequest)
if err != nil {
return err
}
err = checkOpsGenieResponse(updateResponse.Code, updateResponse.Status)
if err != nil {
return err
}
return nil
}
func resourceOpsGenieUserDelete(d *schema.ResourceData, meta interface{}) error {
log.Printf("[INFO] Deleting OpsGenie user '%s'", d.Get("username").(string))
client := meta.(*OpsGenieClient).users
deleteRequest := user.DeleteUserRequest{
Id: d.Id(),
}
_, err := client.Delete(deleteRequest)
if err != nil {
return err
}
return nil
}
func validateOpsGenieUserUsername(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) >= 100 {
errors = append(errors, fmt.Errorf("%q cannot be longer than 100 characters: %q %d", k, value, len(value)))
}
return
}
func validateOpsGenieUserFullName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) >= 512 {
errors = append(errors, fmt.Errorf("%q cannot be longer than 512 characters: %q %d", k, value, len(value)))
}
return
}
func validateOpsGenieUserRole(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) >= 512 {
errors = append(errors, fmt.Errorf("%q cannot be longer than 512 characters: %q %d", k, value, len(value)))
}
return
}

View File

@ -0,0 +1,206 @@
package opsgenie
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/opsgenie/opsgenie-go-sdk/user"
)
func TestAccOpsGenieUserUsername_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "hello",
ErrCount: 0,
},
{
Value: acctest.RandString(99),
ErrCount: 0,
},
{
Value: acctest.RandString(100),
ErrCount: 1,
},
}
for _, tc := range cases {
_, errors := validateOpsGenieUserUsername(tc.Value, "opsgenie_team")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the OpsGenie User Username Validation to trigger a validation error: %v", errors)
}
}
}
func TestAccOpsGenieUserFullName_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "hello",
ErrCount: 0,
},
{
Value: acctest.RandString(100),
ErrCount: 0,
},
{
Value: acctest.RandString(511),
ErrCount: 0,
},
{
Value: acctest.RandString(512),
ErrCount: 1,
},
}
for _, tc := range cases {
_, errors := validateOpsGenieUserFullName(tc.Value, "opsgenie_team")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the OpsGenie User Full Name Validation to trigger a validation error: %v", errors)
}
}
}
func TestAccOpsGenieUserRole_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "hello",
ErrCount: 0,
},
{
Value: acctest.RandString(100),
ErrCount: 0,
},
{
Value: acctest.RandString(511),
ErrCount: 0,
},
{
Value: acctest.RandString(512),
ErrCount: 1,
},
}
for _, tc := range cases {
_, errors := validateOpsGenieUserRole(tc.Value, "opsgenie_team")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the OpsGenie User Role Validation to trigger a validation error: %v", errors)
}
}
}
func TestAccOpsGenieUser_basic(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieUser_basic, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieUserDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckOpsGenieUserExists("opsgenie_user.test"),
),
},
},
})
}
func TestAccOpsGenieUser_complete(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccOpsGenieUser_complete, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckOpsGenieUserDestroy,
Steps: []resource.TestStep{
{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckOpsGenieUserExists("opsgenie_user.test"),
),
},
},
})
}
func testCheckOpsGenieUserDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*OpsGenieClient).users
for _, rs := range s.RootModule().Resources {
if rs.Type != "opsgenie_user" {
continue
}
req := user.GetUserRequest{
Id: rs.Primary.Attributes["id"],
}
result, _ := client.Get(req)
if result != nil {
return fmt.Errorf("User still exists:\n%#v", result)
}
}
return nil
}
func testCheckOpsGenieUserExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
id := rs.Primary.Attributes["id"]
username := rs.Primary.Attributes["username"]
client := testAccProvider.Meta().(*OpsGenieClient).users
req := user.GetUserRequest{
Id: rs.Primary.Attributes["id"],
}
result, _ := client.Get(req)
if result == nil {
return fmt.Errorf("Bad: User %q (username: %q) does not exist", id, username)
}
return nil
}
}
var testAccOpsGenieUser_basic = `
resource "opsgenie_user" "test" {
username = "acctest-%d@example.tld"
full_name = "Acceptance Test User"
role = "User"
}
`
var testAccOpsGenieUser_complete = `
resource "opsgenie_user" "test" {
username = "acctest-%d@example.tld"
full_name = "Acceptance Test User"
role = "User"
locale = "en_GB"
timezone = "Etc/GMT"
}
`

View File

@ -0,0 +1,14 @@
package opsgenie
import (
"fmt"
"net/http"
)
func checkOpsGenieResponse(code int, status string) error {
if code == http.StatusOK {
return nil
}
return fmt.Errorf("Unexpected Status Code '%d', Response '%s'", code, status)
}

View File

@ -41,6 +41,7 @@ import (
nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad"
nullprovider "github.com/hashicorp/terraform/builtin/providers/null"
openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack"
opsgenieprovider "github.com/hashicorp/terraform/builtin/providers/opsgenie"
packetprovider "github.com/hashicorp/terraform/builtin/providers/packet"
pagerdutyprovider "github.com/hashicorp/terraform/builtin/providers/pagerduty"
postgresqlprovider "github.com/hashicorp/terraform/builtin/providers/postgresql"
@ -106,6 +107,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{
"nomad": nomadprovider.Provider,
"null": nullprovider.Provider,
"openstack": openstackprovider.Provider,
"opsgenie": opsgenieprovider.Provider,
"packet": packetprovider.Provider,
"pagerduty": pagerdutyprovider.Provider,
"postgresql": postgresqlprovider.Provider,

24
vendor/github.com/cihub/seelog/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,24 @@
Copyright (c) 2012, Cloud Instruments Co., Ltd. <info@cin.io>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Cloud Instruments Co., Ltd. nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

116
vendor/github.com/cihub/seelog/README.markdown generated vendored Normal file
View File

@ -0,0 +1,116 @@
Seelog
=======
Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages.
It is natively written in the [Go](http://golang.org/) programming language.
[![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest)
Features
------------------
* Xml configuring to be able to change logger parameters without recompilation
* Changing configurations on the fly without app restart
* Possibility to set different log configurations for different project files and functions
* Adjustable message formatting
* Simultaneous log output to multiple streams
* Choosing logger priority strategy to minimize performance hit
* Different output writers
* Console writer
* File writer
* Buffered writer (Chunk writer)
* Rolling log writer (Logging with rotation)
* SMTP writer
* Others... (See [Wiki](https://github.com/cihub/seelog/wiki))
* Log message wrappers (JSON, XML, etc.)
* Global variables and functions for easy usage in standalone apps
* Functions for flexible usage in libraries
Quick-start
-----------
```go
package main
import log "github.com/cihub/seelog"
func main() {
defer log.Flush()
log.Info("Hello from Seelog!")
}
```
Installation
------------
If you don't have the Go development environment installed, visit the
[Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command:
```
go get -u github.com/cihub/seelog
```
*IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get')
Documentation
---------------
Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki
Examples
---------------
Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples)
Issues
---------------
Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues
Changelog
---------------
* **v2.6** : Config using code and custom formatters
* Configuration using code in addition to xml (All internal receiver/dispatcher/logger types are now exported).
* Custom formatters. Check [wiki](https://github.com/cihub/seelog/wiki/Custom-formatters)
* Bugfixes and internal improvements.
* **v2.5** : Interaction with other systems. Part 2: custom receivers
* Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers)
* Added 'LoggerFromCustomReceiver'
* Added 'LoggerFromWriterWithMinLevelAndFormat'
* Added 'LoggerFromCustomReceiver'
* Added 'LoggerFromParamConfigAs...'
* **v2.4** : Interaction with other systems. Part 1: wrapping seelog
* Added configurable caller stack skip logic
* Added 'SetAdditionalStackDepth' to 'LoggerInterface'
* **v2.3** : Rethinking 'rolling' receiver
* Reimplemented 'rolling' receiver
* Added 'Max rolls' feature for 'rolling' receiver with type='date'
* Fixed 'rolling' receiver issue: renaming on Windows
* **v2.2** : go1.0 compatibility point [go1.0 tag]
* Fixed internal bugs
* Added 'ANSI n [;k]' format identifier: %EscN
* Made current release go1 compatible
* **v2.1** : Some new features
* Rolling receiver archiving option.
* Added format identifier: %Line
* Smtp: added paths to PEM files directories
* Added format identifier: %FuncShort
* Warn, Error and Critical methods now return an error
* **v2.0** : Second major release. BREAKING CHANGES.
* Support of binaries with stripped symbols
* Added log strategy: adaptive
* Critical message now forces Flush()
* Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast
* Added receiver: conn (network connection writer)
* BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle
* Bug fixes
* **v1.0** : Initial release. Features:
* Xml config
* Changing configurations on the fly without app restart
* Contraints and exceptions
* Formatting
* Log strategies: sync, async loop, async timer
* Receivers: buffered, console, file, rolling, smtp

198
vendor/github.com/cihub/seelog/archive/archive.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
package archive
import (
"archive/tar"
"archive/zip"
"fmt"
"io"
"io/ioutil"
"os"
"time"
"github.com/cihub/seelog/archive/gzip"
)
// Reader is the interface for reading files from an archive.
type Reader interface {
NextFile() (name string, err error)
io.Reader
}
// ReadCloser is the interface that groups Reader with the Close method.
type ReadCloser interface {
Reader
io.Closer
}
// Writer is the interface for writing files to an archived format.
type Writer interface {
NextFile(name string, fi os.FileInfo) error
io.Writer
}
// WriteCloser is the interface that groups Writer with the Close method.
type WriteCloser interface {
Writer
io.Closer
}
type nopCloser struct{ Reader }
func (nopCloser) Close() error { return nil }
// NopCloser returns a ReadCloser with a no-op Close method wrapping the
// provided Reader r.
func NopCloser(r Reader) ReadCloser {
return nopCloser{r}
}
// Copy copies from src to dest until either EOF is reached on src or an error
// occurs.
//
// When the archive format of src matches that of dst, Copy streams the files
// directly into dst. Otherwise, copy buffers the contents to disk to compute
// headers before writing to dst.
func Copy(dst Writer, src Reader) error {
switch src := src.(type) {
case tarReader:
if dst, ok := dst.(tarWriter); ok {
return copyTar(dst, src)
}
case zipReader:
if dst, ok := dst.(zipWriter); ok {
return copyZip(dst, src)
}
// Switch on concrete type because gzip has no special methods
case *gzip.Reader:
if dst, ok := dst.(*gzip.Writer); ok {
_, err := io.Copy(dst, src)
return err
}
}
return copyBuffer(dst, src)
}
func copyBuffer(dst Writer, src Reader) (err error) {
const defaultFileMode = 0666
buf, err := ioutil.TempFile("", "archive_copy_buffer")
if err != nil {
return err
}
defer os.Remove(buf.Name()) // Do not care about failure removing temp
defer buf.Close() // Do not care about failure closing temp
for {
// Handle the next file
name, err := src.NextFile()
switch err {
case io.EOF: // Done copying
return nil
default: // Failed to write: bail out
return err
case nil: // Proceed below
}
// Buffer the file
if _, err := io.Copy(buf, src); err != nil {
return fmt.Errorf("buffer to disk: %v", err)
}
// Seek to the start of the file for full file copy
if _, err := buf.Seek(0, os.SEEK_SET); err != nil {
return err
}
// Set desired file permissions
if err := os.Chmod(buf.Name(), defaultFileMode); err != nil {
return err
}
fi, err := buf.Stat()
if err != nil {
return err
}
// Write the buffered file
if err := dst.NextFile(name, fi); err != nil {
return err
}
if _, err := io.Copy(dst, buf); err != nil {
return fmt.Errorf("copy to dst: %v", err)
}
if err := buf.Truncate(0); err != nil {
return err
}
if _, err := buf.Seek(0, os.SEEK_SET); err != nil {
return err
}
}
}
type tarReader interface {
Next() (*tar.Header, error)
io.Reader
}
type tarWriter interface {
WriteHeader(hdr *tar.Header) error
io.Writer
}
type zipReader interface {
Files() []*zip.File
}
type zipWriter interface {
CreateHeader(fh *zip.FileHeader) (io.Writer, error)
}
func copyTar(w tarWriter, r tarReader) error {
for {
hdr, err := r.Next()
switch err {
case io.EOF:
return nil
default: // Handle error
return err
case nil: // Proceed below
}
info := hdr.FileInfo()
// Skip directories
if info.IsDir() {
continue
}
if err := w.WriteHeader(hdr); err != nil {
return err
}
if _, err := io.Copy(w, r); err != nil {
return err
}
}
}
func copyZip(zw zipWriter, r zipReader) error {
for _, f := range r.Files() {
if err := copyZipFile(zw, f); err != nil {
return err
}
}
return nil
}
func copyZipFile(zw zipWriter, f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer rc.Close() // Read-only
hdr := f.FileHeader
hdr.SetModTime(time.Now())
w, err := zw.CreateHeader(&hdr)
if err != nil {
return err
}
_, err = io.Copy(w, rc)
return err
}

178
vendor/github.com/cihub/seelog/archive/archive_test.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
package archive_test
import (
"bytes"
"fmt"
"io"
"testing"
"github.com/cihub/seelog/archive"
"github.com/cihub/seelog/archive/gzip"
"github.com/cihub/seelog/archive/tar"
"github.com/cihub/seelog/archive/zip"
"github.com/cihub/seelog/io/iotest"
)
const (
gzipType = "gzip"
tarType = "tar"
zipType = "zip"
)
var types = []string{gzipType, tarType, zipType}
type file struct {
name string
contents []byte
}
var (
oneFile = []file{
{
name: "file1",
contents: []byte("This is a single log."),
},
}
twoFiles = []file{
{
name: "file1",
contents: []byte("This is a log."),
},
{
name: "file2",
contents: []byte("This is another log."),
},
}
)
type testCase struct {
srcType, dstType string
in []file
}
func copyTests() map[string]testCase {
// types X types X files
tests := make(map[string]testCase, len(types)*len(types)*2)
for _, srct := range types {
for _, dstt := range types {
tests[fmt.Sprintf("%s to %s: one file", srct, dstt)] = testCase{
srcType: srct,
dstType: dstt,
in: oneFile,
}
// gzip does not handle more than one file
if srct != gzipType && dstt != gzipType {
tests[fmt.Sprintf("%s to %s: two files", srct, dstt)] = testCase{
srcType: srct,
dstType: dstt,
in: twoFiles,
}
}
}
}
return tests
}
func TestCopy(t *testing.T) {
srcb, dstb := new(bytes.Buffer), new(bytes.Buffer)
for tname, tt := range copyTests() {
// Reset buffers between tests
srcb.Reset()
dstb.Reset()
// Last file name (needed for gzip.NewReader)
var fname string
// Seed the src
srcw := writer(t, tname, srcb, tt.srcType)
for _, f := range tt.in {
srcw.NextFile(f.name, iotest.FileInfo(t, f.contents))
mustCopy(t, tname, srcw, bytes.NewReader(f.contents))
fname = f.name
}
mustClose(t, tname, srcw)
// Perform the copy
srcr := reader(t, tname, srcb, tt.srcType, fname)
dstw := writer(t, tname, dstb, tt.dstType)
if err := archive.Copy(dstw, srcr); err != nil {
t.Fatalf("%s: %v", tname, err)
}
srcr.Close() // Read-only
mustClose(t, tname, dstw)
// Read back dst to confirm our expectations
dstr := reader(t, tname, dstb, tt.dstType, fname)
for _, want := range tt.in {
buf := new(bytes.Buffer)
name, err := dstr.NextFile()
if err != nil {
t.Fatalf("%s: %v", tname, err)
}
mustCopy(t, tname, buf, dstr)
got := file{
name: name,
contents: buf.Bytes(),
}
switch {
case got.name != want.name:
t.Errorf("%s: got file %q but want file %q",
tname, got.name, want.name)
case !bytes.Equal(got.contents, want.contents):
t.Errorf("%s: mismatched contents in %q: got %q but want %q",
tname, got.name, got.contents, want.contents)
}
}
dstr.Close()
}
}
func writer(t *testing.T, tname string, w io.Writer, atype string) archive.WriteCloser {
switch atype {
case gzipType:
return gzip.NewWriter(w)
case tarType:
return tar.NewWriter(w)
case zipType:
return zip.NewWriter(w)
}
t.Fatalf("%s: unrecognized archive type: %s", tname, atype)
panic("execution continued after (*testing.T).Fatalf")
}
func reader(t *testing.T, tname string, buf *bytes.Buffer, atype string, fname string) archive.ReadCloser {
switch atype {
case gzipType:
gr, err := gzip.NewReader(buf, fname)
if err != nil {
t.Fatalf("%s: %v", tname, err)
}
return gr
case tarType:
return archive.NopCloser(tar.NewReader(buf))
case zipType:
zr, err := zip.NewReader(
bytes.NewReader(buf.Bytes()),
int64(buf.Len()))
if err != nil {
t.Fatalf("%s: new zip reader: %v", tname, err)
}
return archive.NopCloser(zr)
}
t.Fatalf("%s: unrecognized archive type: %s", tname, atype)
panic("execution continued after (*testing.T).Fatalf")
}
func mustCopy(t *testing.T, tname string, dst io.Writer, src io.Reader) {
if _, err := io.Copy(dst, src); err != nil {
t.Fatalf("%s: copy: %v", tname, err)
}
}
func mustClose(t *testing.T, tname string, c io.Closer) {
if err := c.Close(); err != nil {
t.Fatalf("%s: close: %v", tname, err)
}
}

64
vendor/github.com/cihub/seelog/archive/gzip/gzip.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Package gzip implements reading and writing of gzip format compressed files.
// See the compress/gzip package for more details.
package gzip
import (
"compress/gzip"
"fmt"
"io"
"os"
)
// Reader is an io.Reader that can be read to retrieve uncompressed data from a
// gzip-format compressed file.
type Reader struct {
gzip.Reader
name string
isEOF bool
}
// NewReader creates a new Reader reading the given reader.
func NewReader(r io.Reader, name string) (*Reader, error) {
gr, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
return &Reader{
Reader: *gr,
name: name,
}, nil
}
// NextFile returns the file name. Calls subsequent to the first call will
// return EOF.
func (r *Reader) NextFile() (name string, err error) {
if r.isEOF {
return "", io.EOF
}
r.isEOF = true
return r.name, nil
}
// Writer is an io.WriteCloser. Writes to a Writer are compressed and written to w.
type Writer struct {
gzip.Writer
name string
noMoreFiles bool
}
// NextFile never returns a next file, and should not be called more than once.
func (w *Writer) NextFile(name string, _ os.FileInfo) error {
if w.noMoreFiles {
return fmt.Errorf("gzip: only accepts one file: already received %q and now %q", w.name, name)
}
w.noMoreFiles = true
w.name = name
return nil
}
// NewWriter returns a new Writer. Writes to the returned writer are compressed
// and written to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{Writer: *gzip.NewWriter(w)}
}

72
vendor/github.com/cihub/seelog/archive/tar/tar.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package tar
import (
"archive/tar"
"io"
"os"
)
// Reader provides sequential access to the contents of a tar archive.
type Reader struct {
tar.Reader
}
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader {
return &Reader{Reader: *tar.NewReader(r)}
}
// NextFile advances to the next file in the tar archive.
func (r *Reader) NextFile() (name string, err error) {
hdr, err := r.Next()
if err != nil {
return "", err
}
return hdr.Name, nil
}
// Writer provides sequential writing of a tar archive in POSIX.1 format.
type Writer struct {
tar.Writer
closers []io.Closer
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{Writer: *tar.NewWriter(w)}
}
// NewWriteMultiCloser creates a new Writer writing to w that also closes all
// closers in order on close.
func NewWriteMultiCloser(w io.WriteCloser, closers ...io.Closer) *Writer {
return &Writer{
Writer: *tar.NewWriter(w),
closers: closers,
}
}
// NextFile computes and writes a header and prepares to accept the file's
// contents.
func (w *Writer) NextFile(name string, fi os.FileInfo) error {
if name == "" {
name = fi.Name()
}
hdr, err := tar.FileInfoHeader(fi, name)
if err != nil {
return err
}
hdr.Name = name
return w.WriteHeader(hdr)
}
// Close closes the tar archive and all other closers, flushing any unwritten
// data to the underlying writer.
func (w *Writer) Close() error {
err := w.Writer.Close()
for _, c := range w.closers {
if cerr := c.Close(); cerr != nil && err == nil {
err = cerr
}
}
return err
}

104
vendor/github.com/cihub/seelog/archive/tar/tar_test.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
package tar_test
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"github.com/cihub/seelog/archive/tar"
"github.com/cihub/seelog/io/iotest"
)
type file struct {
name string
contents []byte
}
var tarTests = map[string]struct{ want []file }{
"one file": {
want: []file{
{
name: "file",
contents: []byte("I am a log file"),
},
},
},
"multiple files": {
want: []file{
{
name: "file1",
contents: []byte("I am log file 1"),
},
{
name: "file2",
contents: []byte("I am log file 2"),
},
},
},
}
func TestWriterAndReader(t *testing.T) {
for tname, tt := range tarTests {
f, cleanup := iotest.TempFile(t)
defer cleanup()
writeFiles(t, f, tname, tt.want)
readFiles(t, f, tname, tt.want)
}
}
// writeFiles iterates through the files we want and writes them as a tarred
// file.
func writeFiles(t *testing.T, f *os.File, tname string, want []file) {
w := tar.NewWriter(f)
defer w.Close()
// Write zipped files
for _, fwant := range want {
fi := iotest.FileInfo(t, fwant.contents)
// Write the file
err := w.NextFile(fwant.name, fi)
switch err {
case io.EOF:
break
default:
t.Fatalf("%s: write header for next file: %v", tname, err)
case nil: // Proceed below
}
if _, err := io.Copy(w, bytes.NewReader(fwant.contents)); err != nil {
t.Fatalf("%s: copy to writer: %v", tname, err)
}
}
}
// readFiles iterates through tarred files and ensures they are the same.
func readFiles(t *testing.T, f *os.File, tname string, want []file) {
r := tar.NewReader(f)
for _, fwant := range want {
fname, err := r.NextFile()
switch err {
case io.EOF:
return
default:
t.Fatalf("%s: read header for next file: %v", tname, err)
case nil: // Proceed below
}
if fname != fwant.name {
t.Fatalf("%s: incorrect file name: got %q but want %q", tname, fname, fwant.name)
continue
}
gotContents, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("%s: read file: %v", tname, err)
}
if !bytes.Equal(gotContents, fwant.contents) {
t.Errorf("%s: %q = %q but want %q", tname, fname, gotContents, fwant.contents)
}
}
}

89
vendor/github.com/cihub/seelog/archive/zip/zip.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package zip
import (
"archive/zip"
"io"
"os"
)
// Reader provides sequential access to the contents of a zip archive.
type Reader struct {
zip.Reader
unread []*zip.File
rc io.ReadCloser
}
// NewReader returns a new Reader reading from r, which is assumed to have the
// given size in bytes.
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
zr, err := zip.NewReader(r, size)
if err != nil {
return nil, err
}
return &Reader{Reader: *zr}, nil
}
// NextFile advances to the next file in the zip archive.
func (r *Reader) NextFile() (name string, err error) {
// Initialize unread
if r.unread == nil {
r.unread = r.Files()[:]
}
// Close previous file
if r.rc != nil {
r.rc.Close() // Read-only
}
if len(r.unread) == 0 {
return "", io.EOF
}
// Open and return next unread
f := r.unread[0]
name, r.unread = f.Name, r.unread[1:]
r.rc, err = f.Open()
if err != nil {
return "", err
}
return name, nil
}
func (r *Reader) Read(p []byte) (n int, err error) {
return r.rc.Read(p)
}
// Files returns the full list of files in the zip archive.
func (r *Reader) Files() []*zip.File {
return r.File
}
// Writer provides sequential writing of a zip archive.1 format.
type Writer struct {
zip.Writer
w io.Writer
}
// NewWriter returns a new Writer writing to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{Writer: *zip.NewWriter(w)}
}
// NextFile computes and writes a header and prepares to accept the file's
// contents.
func (w *Writer) NextFile(name string, fi os.FileInfo) error {
if name == "" {
name = fi.Name()
}
hdr, err := zip.FileInfoHeader(fi)
if err != nil {
return err
}
hdr.Name = name
w.w, err = w.CreateHeader(hdr)
return err
}
func (w *Writer) Write(p []byte) (n int, err error) {
return w.w.Write(p)
}

99
vendor/github.com/cihub/seelog/archive/zip/zip_test.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package zip_test
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"github.com/cihub/seelog/archive/zip"
"github.com/cihub/seelog/io/iotest"
)
var zipTests = map[string]struct{ want map[string][]byte }{
"one file": {
want: map[string][]byte{
"file": []byte("I am a log file"),
},
},
"multiple files": {
want: map[string][]byte{
"file1": []byte("I am log file 1"),
"file2": []byte("I am log file 2"),
},
},
}
func TestWriterAndReader(t *testing.T) {
for tname, tt := range zipTests {
f, cleanup := iotest.TempFile(t)
defer cleanup()
writeFiles(t, f, tname, tt.want)
readFiles(t, f, tname, tt.want)
}
}
// writeFiles iterates through the files we want and writes them as a zipped
// file.
func writeFiles(t *testing.T, f *os.File, tname string, want map[string][]byte) {
w := zip.NewWriter(f)
defer w.Close()
// Write zipped files
for fname, fbytes := range want {
fi := iotest.FileInfo(t, fbytes)
// Write the file
err := w.NextFile(fname, fi)
switch err {
case io.EOF:
break
default:
t.Fatalf("%s: write header for next file: %v", tname, err)
case nil: // Proceed below
}
if _, err := io.Copy(w, bytes.NewReader(fbytes)); err != nil {
t.Fatalf("%s: copy to writer: %v", tname, err)
}
}
}
// readFiles iterates through zipped files and ensures they are the same.
func readFiles(t *testing.T, f *os.File, tname string, want map[string][]byte) {
// Get zip Reader
fi, err := f.Stat()
if err != nil {
t.Fatalf("%s: stat zipped file: %v", tname, err)
}
r, err := zip.NewReader(f, fi.Size())
if err != nil {
t.Fatalf("%s: %v", tname, err)
}
for {
fname, err := r.NextFile()
switch err {
case io.EOF:
return
default:
t.Fatalf("%s: read header for next file: %v", tname, err)
case nil: // Proceed below
}
wantBytes, ok := want[fname]
if !ok {
t.Errorf("%s: read unwanted file: %v", tname, fname)
continue
}
gotBytes, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("%s: read file: %v", tname, err)
}
if !bytes.Equal(gotBytes, wantBytes) {
t.Errorf("%s: %q = %q but want %q", tname, fname, gotBytes, wantBytes)
}
}
}

View File

@ -0,0 +1,129 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"math"
"time"
)
var (
adaptiveLoggerMaxInterval = time.Minute
adaptiveLoggerMaxCriticalMsgCount = uint32(1000)
)
// asyncAdaptiveLogger represents asynchronous adaptive logger which acts like
// an async timer logger, but its interval depends on the current message count
// in the queue.
//
// Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c:
// I = m + (C - Min(c, C)) / C * (M - m)
type asyncAdaptiveLogger struct {
asyncLogger
minInterval time.Duration
criticalMsgCount uint32
maxInterval time.Duration
}
// NewAsyncLoopLogger creates a new asynchronous adaptive logger
func NewAsyncAdaptiveLogger(
config *logConfig,
minInterval time.Duration,
maxInterval time.Duration,
criticalMsgCount uint32) (*asyncAdaptiveLogger, error) {
if minInterval <= 0 {
return nil, errors.New("async adaptive logger min interval should be > 0")
}
if maxInterval > adaptiveLoggerMaxInterval {
return nil, fmt.Errorf("async adaptive logger max interval should be <= %s",
adaptiveLoggerMaxInterval)
}
if criticalMsgCount <= 0 {
return nil, errors.New("async adaptive logger critical msg count should be > 0")
}
if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount {
return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s",
adaptiveLoggerMaxInterval)
}
asnAdaptiveLogger := new(asyncAdaptiveLogger)
asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config)
asnAdaptiveLogger.minInterval = minInterval
asnAdaptiveLogger.maxInterval = maxInterval
asnAdaptiveLogger.criticalMsgCount = criticalMsgCount
go asnAdaptiveLogger.processQueue()
return asnAdaptiveLogger, nil
}
func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) {
asnAdaptiveLogger.queueHasElements.L.Lock()
defer asnAdaptiveLogger.queueHasElements.L.Unlock()
for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.Closed() {
asnAdaptiveLogger.queueHasElements.Wait()
}
if asnAdaptiveLogger.Closed() {
return true, asnAdaptiveLogger.msgQueue.Len()
}
asnAdaptiveLogger.processQueueElement()
return false, asnAdaptiveLogger.msgQueue.Len() - 1
}
// I = m + (C - Min(c, C)) / C * (M - m) =>
// I = m + cDiff * mDiff,
// cDiff = (C - Min(c, C)) / C)
// mDiff = (M - m)
func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration {
critCountF := float64(asnAdaptiveLogger.criticalMsgCount)
cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF
mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval)
return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff)
}
func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() {
for !asnAdaptiveLogger.Closed() {
closed, itemCount := asnAdaptiveLogger.processItem()
if closed {
break
}
interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount)
<-time.After(interval)
}
}

142
vendor/github.com/cihub/seelog/behavior_asynclogger.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"container/list"
"fmt"
"sync"
)
// MaxQueueSize is the critical number of messages in the queue that result in an immediate flush.
const (
MaxQueueSize = 10000
)
type msgQueueItem struct {
level LogLevel
context LogContextInterface
message fmt.Stringer
}
// asyncLogger represents common data for all asynchronous loggers
type asyncLogger struct {
commonLogger
msgQueue *list.List
queueHasElements *sync.Cond
}
// newAsyncLogger creates a new asynchronous logger
func newAsyncLogger(config *logConfig) *asyncLogger {
asnLogger := new(asyncLogger)
asnLogger.msgQueue = list.New()
asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex))
asnLogger.commonLogger = *newCommonLogger(config, asnLogger)
return asnLogger
}
func (asnLogger *asyncLogger) innerLog(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
asnLogger.addMsgToQueue(level, context, message)
}
func (asnLogger *asyncLogger) Close() {
asnLogger.m.Lock()
defer asnLogger.m.Unlock()
if !asnLogger.Closed() {
asnLogger.flushQueue(true)
asnLogger.config.RootDispatcher.Flush()
if err := asnLogger.config.RootDispatcher.Close(); err != nil {
reportInternalError(err)
}
asnLogger.closedM.Lock()
asnLogger.closed = true
asnLogger.closedM.Unlock()
asnLogger.queueHasElements.Broadcast()
}
}
func (asnLogger *asyncLogger) Flush() {
asnLogger.m.Lock()
defer asnLogger.m.Unlock()
if !asnLogger.Closed() {
asnLogger.flushQueue(true)
asnLogger.config.RootDispatcher.Flush()
}
}
func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) {
if lockNeeded {
asnLogger.queueHasElements.L.Lock()
defer asnLogger.queueHasElements.L.Unlock()
}
for asnLogger.msgQueue.Len() > 0 {
asnLogger.processQueueElement()
}
}
func (asnLogger *asyncLogger) processQueueElement() {
if asnLogger.msgQueue.Len() > 0 {
backElement := asnLogger.msgQueue.Front()
msg, _ := backElement.Value.(msgQueueItem)
asnLogger.processLogMsg(msg.level, msg.message, msg.context)
asnLogger.msgQueue.Remove(backElement)
}
}
func (asnLogger *asyncLogger) addMsgToQueue(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
if !asnLogger.Closed() {
asnLogger.queueHasElements.L.Lock()
defer asnLogger.queueHasElements.L.Unlock()
if asnLogger.msgQueue.Len() >= MaxQueueSize {
fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize)
asnLogger.flushQueue(false)
}
queueItem := msgQueueItem{level, context, message}
asnLogger.msgQueue.PushBack(queueItem)
asnLogger.queueHasElements.Broadcast()
} else {
err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message)
reportInternalError(err)
}
}

View File

@ -0,0 +1,69 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// asyncLoopLogger represents asynchronous logger which processes the log queue in
// a 'for' loop
type asyncLoopLogger struct {
asyncLogger
}
// NewAsyncLoopLogger creates a new asynchronous loop logger
func NewAsyncLoopLogger(config *logConfig) *asyncLoopLogger {
asnLoopLogger := new(asyncLoopLogger)
asnLoopLogger.asyncLogger = *newAsyncLogger(config)
go asnLoopLogger.processQueue()
return asnLoopLogger
}
func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) {
asnLoopLogger.queueHasElements.L.Lock()
defer asnLoopLogger.queueHasElements.L.Unlock()
for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.Closed() {
asnLoopLogger.queueHasElements.Wait()
}
if asnLoopLogger.Closed() {
return true
}
asnLoopLogger.processQueueElement()
return false
}
func (asnLoopLogger *asyncLoopLogger) processQueue() {
for !asnLoopLogger.Closed() {
closed := asnLoopLogger.processItem()
if closed {
break
}
}
}

View File

@ -0,0 +1,82 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"time"
)
// asyncTimerLogger represents asynchronous logger which processes the log queue each
// 'duration' nanoseconds
type asyncTimerLogger struct {
asyncLogger
interval time.Duration
}
// NewAsyncLoopLogger creates a new asynchronous loop logger
func NewAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) {
if interval <= 0 {
return nil, errors.New("async logger interval should be > 0")
}
asnTimerLogger := new(asyncTimerLogger)
asnTimerLogger.asyncLogger = *newAsyncLogger(config)
asnTimerLogger.interval = interval
go asnTimerLogger.processQueue()
return asnTimerLogger, nil
}
func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) {
asnTimerLogger.queueHasElements.L.Lock()
defer asnTimerLogger.queueHasElements.L.Unlock()
for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.Closed() {
asnTimerLogger.queueHasElements.Wait()
}
if asnTimerLogger.Closed() {
return true
}
asnTimerLogger.processQueueElement()
return false
}
func (asnTimerLogger *asyncTimerLogger) processQueue() {
for !asnTimerLogger.Closed() {
closed := asnTimerLogger.processItem()
if closed {
break
}
<-time.After(asnTimerLogger.interval)
}
}

75
vendor/github.com/cihub/seelog/behavior_synclogger.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// syncLogger performs logging in the same goroutine where 'Trace/Debug/...'
// func was called
type syncLogger struct {
commonLogger
}
// NewSyncLogger creates a new synchronous logger
func NewSyncLogger(config *logConfig) *syncLogger {
syncLogger := new(syncLogger)
syncLogger.commonLogger = *newCommonLogger(config, syncLogger)
return syncLogger
}
func (syncLogger *syncLogger) innerLog(
level LogLevel,
context LogContextInterface,
message fmt.Stringer) {
syncLogger.processLogMsg(level, message, context)
}
func (syncLogger *syncLogger) Close() {
syncLogger.m.Lock()
defer syncLogger.m.Unlock()
if !syncLogger.Closed() {
if err := syncLogger.config.RootDispatcher.Close(); err != nil {
reportInternalError(err)
}
syncLogger.closedM.Lock()
syncLogger.closed = true
syncLogger.closedM.Unlock()
}
}
func (syncLogger *syncLogger) Flush() {
syncLogger.m.Lock()
defer syncLogger.m.Unlock()
if !syncLogger.Closed() {
syncLogger.config.RootDispatcher.Flush()
}
}

212
vendor/github.com/cihub/seelog/cfg_config.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"os"
)
// LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml.
func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) {
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
conf, err := configFromReader(file)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml.
func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) {
conf, err := configFromReader(bytes.NewBuffer(data))
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml.
func LoggerFromConfigAsString(data string) (LoggerInterface, error) {
return LoggerFromConfigAsBytes([]byte(data))
}
// LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) {
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
conf, err := configFromReaderWithConfig(file, parserParams)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) {
conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options.
// See 'CfgParseParams' comments.
func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) {
return LoggerFromParamConfigAsBytes([]byte(data), parserParams)
}
// LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)
func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) {
return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat)
}
// LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the
// receiver with minimal level = minLevel and with specified format.
//
// All messages with level more or equal to minLevel will be written to output and
// formatted using the default seelog format.
//
// Can be called for usage with non-Seelog systems
func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) {
constraints, err := NewMinMaxConstraints(minLevel, CriticalLvl)
if err != nil {
return nil, err
}
formatter, err := NewFormatter(format)
if err != nil {
return nil, err
}
dispatcher, err := NewSplitDispatcher(formatter, []interface{}{output})
if err != nil {
return nil, err
}
conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node.
// It should contain valid seelog xml, except for root node name.
func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) {
conf, err := configFromXMLDecoder(xmlParser, rootNode)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
// LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the
// receiver.
//
// All messages will be sent to the specified custom receiver without additional
// formatting ('%Msg' format is used).
//
// Check CustomReceiver, RegisterReceiver for additional info.
//
// NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated
// by the config parser while parsing config. So, if you are not planning to use the
// same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and
// loading from config, just leave AfterParse implementation empty.
//
// NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized
// instance that implements CustomReceiver. So, fill it with data and perform any initialization
// logic before calling this func and it won't be lost.
//
// So:
// * RegisterReceiver takes value just to get the reflect.Type from it and then
// instantiate it as many times as config is reloaded.
//
// * LoggerFromCustomReceiver takes value and uses it without modification and
// reinstantiation, directy passing it to the dispatcher tree.
func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) {
constraints, err := NewMinMaxConstraints(TraceLvl, CriticalLvl)
if err != nil {
return nil, err
}
output, err := NewCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{})
if err != nil {
return nil, err
}
dispatcher, err := NewSplitDispatcher(msgonlyformatter, []interface{}{output})
if err != nil {
return nil, err
}
conf, err := newFullLoggerConfig(constraints, make([]*LogLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil)
if err != nil {
return nil, err
}
return createLoggerFromFullConfig(conf)
}
func CloneLogger(logger LoggerInterface) (LoggerInterface, error) {
switch logger := logger.(type) {
default:
return nil, fmt.Errorf("unexpected type %T", logger)
case *asyncAdaptiveLogger:
clone, err := NewAsyncAdaptiveLogger(logger.commonLogger.config, logger.minInterval, logger.maxInterval, logger.criticalMsgCount)
if err != nil {
return nil, err
}
return clone, nil
case *asyncLoopLogger:
return NewAsyncLoopLogger(logger.commonLogger.config), nil
case *asyncTimerLogger:
clone, err := NewAsyncTimerLogger(logger.commonLogger.config, logger.interval)
if err != nil {
return nil, err
}
return clone, nil
case *syncLogger:
return NewSyncLogger(logger.commonLogger.config), nil
}
}

61
vendor/github.com/cihub/seelog/cfg_errors.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
)
var (
errNodeMustHaveChildren = errors.New("node must have children")
errNodeCannotHaveChildren = errors.New("node cannot have children")
)
type unexpectedChildElementError struct {
baseError
}
func newUnexpectedChildElementError(msg string) *unexpectedChildElementError {
custmsg := "Unexpected child element: " + msg
return &unexpectedChildElementError{baseError{message: custmsg}}
}
type missingArgumentError struct {
baseError
}
func newMissingArgumentError(nodeName, attrName string) *missingArgumentError {
custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute"
return &missingArgumentError{baseError{message: custmsg}}
}
type unexpectedAttributeError struct {
baseError
}
func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError {
custmsg := nodeName + " has unexpected attribute: " + attr
return &unexpectedAttributeError{baseError{message: custmsg}}
}

141
vendor/github.com/cihub/seelog/cfg_logconfig.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
)
type loggerTypeFromString uint8
const (
syncloggerTypeFromString = iota
asyncLooploggerTypeFromString
asyncTimerloggerTypeFromString
adaptiveLoggerTypeFromString
defaultloggerTypeFromString = asyncLooploggerTypeFromString
)
const (
syncloggerTypeFromStringStr = "sync"
asyncloggerTypeFromStringStr = "asyncloop"
asyncTimerloggerTypeFromStringStr = "asynctimer"
adaptiveLoggerTypeFromStringStr = "adaptive"
)
// asyncTimerLoggerData represents specific data for async timer logger
type asyncTimerLoggerData struct {
AsyncInterval uint32
}
// adaptiveLoggerData represents specific data for adaptive timer logger
type adaptiveLoggerData struct {
MinInterval uint32
MaxInterval uint32
CriticalMsgCount uint32
}
var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{
syncloggerTypeFromString: syncloggerTypeFromStringStr,
asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr,
asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr,
adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr,
}
// getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful.
func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) {
for logType, logTypeStr := range loggerTypeToStringRepresentations {
if logTypeStr == logTypeString {
return logType, true
}
}
return 0, false
}
// logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules
// (general constraints and exceptions)
type logConfig struct {
Constraints logLevelConstraints // General log level rules (>min and <max, or set of allowed levels)
Exceptions []*LogLevelException // Exceptions to general rules for specific files or funcs
RootDispatcher dispatcherInterface // Root of output tree
}
func NewLoggerConfig(c logLevelConstraints, e []*LogLevelException, d dispatcherInterface) *logConfig {
return &logConfig{c, e, d}
}
// configForParsing is used when parsing config from file: logger type is deduced from string, params
// need to be converted from attributes to values and passed to specific logger constructor. Also,
// custom registered receivers and other parse params are used in this case.
type configForParsing struct {
logConfig
LogType loggerTypeFromString
LoggerData interface{}
Params *CfgParseParams // Check cfg_parser: CfgParseParams
}
func newFullLoggerConfig(
constraints logLevelConstraints,
exceptions []*LogLevelException,
rootDispatcher dispatcherInterface,
logType loggerTypeFromString,
logData interface{},
cfgParams *CfgParseParams) (*configForParsing, error) {
if constraints == nil {
return nil, errors.New("constraints can not be nil")
}
if rootDispatcher == nil {
return nil, errors.New("rootDispatcher can not be nil")
}
config := new(configForParsing)
config.Constraints = constraints
config.Exceptions = exceptions
config.RootDispatcher = rootDispatcher
config.LogType = logType
config.LoggerData = logData
config.Params = cfgParams
return config, nil
}
// IsAllowed returns true if logging with specified log level is allowed in current context.
// If any of exception patterns match current context, then exception constraints are applied. Otherwise,
// the general constraints are used.
func (config *logConfig) IsAllowed(level LogLevel, context LogContextInterface) bool {
allowed := config.Constraints.IsAllowed(level) // General rule
// Exceptions:
if context.IsValid() {
for _, exception := range config.Exceptions {
if exception.MatchesContext(context) {
return exception.IsAllowed(level)
}
}
}
return allowed
}

1269
vendor/github.com/cihub/seelog/cfg_parser.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

25
vendor/github.com/cihub/seelog/common_closer.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog

162
vendor/github.com/cihub/seelog/common_constraints.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"strings"
)
// Represents constraints which form a general rule for log levels selection
type logLevelConstraints interface {
IsAllowed(level LogLevel) bool
}
// A minMaxConstraints represents constraints which use minimal and maximal allowed log levels.
type minMaxConstraints struct {
min LogLevel
max LogLevel
}
// NewMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels.
func NewMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) {
if min > max {
return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max)
}
if min < TraceLvl || min > CriticalLvl {
return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min)
}
if max < TraceLvl || max > CriticalLvl {
return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max)
}
return &minMaxConstraints{min, max}, nil
}
// IsAllowed returns true, if log level is in [min, max] range (inclusive).
func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool {
return level >= minMaxConstr.min && level <= minMaxConstr.max
}
func (minMaxConstr *minMaxConstraints) String() string {
return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max)
}
//=======================================================
// A listConstraints represents constraints which use allowed log levels list.
type listConstraints struct {
allowedLevels map[LogLevel]bool
}
// NewListConstraints creates a new listConstraints struct with the specified allowed levels.
func NewListConstraints(allowList []LogLevel) (*listConstraints, error) {
if allowList == nil {
return nil, errors.New("list can't be nil")
}
allowLevels, err := createMapFromList(allowList)
if err != nil {
return nil, err
}
err = validateOffLevel(allowLevels)
if err != nil {
return nil, err
}
return &listConstraints{allowLevels}, nil
}
func (listConstr *listConstraints) String() string {
allowedList := "List: "
listLevel := make([]string, len(listConstr.allowedLevels))
var logLevel LogLevel
i := 0
for logLevel = TraceLvl; logLevel <= Off; logLevel++ {
if listConstr.allowedLevels[logLevel] {
listLevel[i] = logLevel.String()
i++
}
}
allowedList += strings.Join(listLevel, ",")
return allowedList
}
func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) {
allowedLevels := make(map[LogLevel]bool, 0)
for _, level := range allowedList {
if level < TraceLvl || level > Off {
return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level)
}
allowedLevels[level] = true
}
return allowedLevels, nil
}
func validateOffLevel(allowedLevels map[LogLevel]bool) error {
if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 {
return errors.New("logLevel Off cant be mixed with other levels")
}
return nil
}
// IsAllowed returns true, if log level is in allowed log levels list.
// If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values.
func (listConstr *listConstraints) IsAllowed(level LogLevel) bool {
for l := range listConstr.allowedLevels {
if l == level && level != Off {
return true
}
}
return false
}
// AllowedLevels returns allowed levels configuration as a map.
func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool {
return listConstr.allowedLevels
}
//=======================================================
type offConstraints struct {
}
func NewOffConstraints() (*offConstraints, error) {
return &offConstraints{}, nil
}
func (offConstr *offConstraints) IsAllowed(level LogLevel) bool {
return false
}
func (offConstr *offConstraints) String() string {
return "Off constraint"
}

194
vendor/github.com/cihub/seelog/common_context.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
var workingDir = "/"
func init() {
wd, err := os.Getwd()
if err == nil {
workingDir = filepath.ToSlash(wd) + "/"
}
}
// Represents runtime caller context.
type LogContextInterface interface {
// Caller's function name.
Func() string
// Caller's line number.
Line() int
// Caller's file short path (in slashed form).
ShortPath() string
// Caller's file full path (in slashed form).
FullPath() string
// Caller's file name (without path).
FileName() string
// True if the context is correct and may be used.
// If false, then an error in context evaluation occurred and
// all its other data may be corrupted.
IsValid() bool
// Time when log function was called.
CallTime() time.Time
// Custom context that can be set by calling logger.SetContext
CustomContext() interface{}
}
// Returns context of the caller
func currentContext(custom interface{}) (LogContextInterface, error) {
return specifyContext(1, custom)
}
func extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, line int, err error) {
pc, fp, ln, ok := runtime.Caller(skip)
if !ok {
err = fmt.Errorf("error during runtime.Caller")
return
}
line = ln
fullPath = fp
if strings.HasPrefix(fp, workingDir) {
shortPath = fp[len(workingDir):]
} else {
shortPath = fp
}
funcName = runtime.FuncForPC(pc).Name()
if strings.HasPrefix(funcName, workingDir) {
funcName = funcName[len(workingDir):]
}
return
}
// Returns context of the function with placed "skip" stack frames of the caller
// If skip == 0 then behaves like currentContext
// Context is returned in any situation, even if error occurs. But, if an error
// occurs, the returned context is an error context, which contains no paths
// or names, but states that they can't be extracted.
func specifyContext(skip int, custom interface{}) (LogContextInterface, error) {
callTime := time.Now()
if skip < 0 {
err := fmt.Errorf("can not skip negative stack frames")
return &errorContext{callTime, err}, err
}
fullPath, shortPath, funcName, line, err := extractCallerInfo(skip + 2)
if err != nil {
return &errorContext{callTime, err}, err
}
_, fileName := filepath.Split(fullPath)
return &logContext{funcName, line, shortPath, fullPath, fileName, callTime, custom}, nil
}
// Represents a normal runtime caller context.
type logContext struct {
funcName string
line int
shortPath string
fullPath string
fileName string
callTime time.Time
custom interface{}
}
func (context *logContext) IsValid() bool {
return true
}
func (context *logContext) Func() string {
return context.funcName
}
func (context *logContext) Line() int {
return context.line
}
func (context *logContext) ShortPath() string {
return context.shortPath
}
func (context *logContext) FullPath() string {
return context.fullPath
}
func (context *logContext) FileName() string {
return context.fileName
}
func (context *logContext) CallTime() time.Time {
return context.callTime
}
func (context *logContext) CustomContext() interface{} {
return context.custom
}
// Represents an error context
type errorContext struct {
errorTime time.Time
err error
}
func (errContext *errorContext) getErrorText(prefix string) string {
return fmt.Sprintf("%s() error: %s", prefix, errContext.err)
}
func (errContext *errorContext) IsValid() bool {
return false
}
func (errContext *errorContext) Line() int {
return -1
}
func (errContext *errorContext) Func() string {
return errContext.getErrorText("Func")
}
func (errContext *errorContext) ShortPath() string {
return errContext.getErrorText("ShortPath")
}
func (errContext *errorContext) FullPath() string {
return errContext.getErrorText("FullPath")
}
func (errContext *errorContext) FileName() string {
return errContext.getErrorText("FileName")
}
func (errContext *errorContext) CallTime() time.Time {
return errContext.errorTime
}
func (errContext *errorContext) CustomContext() interface{} {
return nil
}

194
vendor/github.com/cihub/seelog/common_exception.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"regexp"
"strings"
)
// Used in rules creation to validate input file and func filters
var (
fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`)
funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`)
)
// LogLevelException represents an exceptional case used when you need some specific files or funcs to
// override general constraints and to use their own.
type LogLevelException struct {
funcPatternParts []string
filePatternParts []string
funcPattern string
filePattern string
constraints logLevelConstraints
}
// NewLogLevelException creates a new exception.
func NewLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*LogLevelException, error) {
if constraints == nil {
return nil, errors.New("constraints can not be nil")
}
exception := new(LogLevelException)
err := exception.initFuncPatternParts(funcPattern)
if err != nil {
return nil, err
}
exception.funcPattern = strings.Join(exception.funcPatternParts, "")
err = exception.initFilePatternParts(filePattern)
if err != nil {
return nil, err
}
exception.filePattern = strings.Join(exception.filePatternParts, "")
exception.constraints = constraints
return exception, nil
}
// MatchesContext returns true if context matches the patterns of this LogLevelException
func (logLevelEx *LogLevelException) MatchesContext(context LogContextInterface) bool {
return logLevelEx.match(context.Func(), context.FullPath())
}
// IsAllowed returns true if log level is allowed according to the constraints of this LogLevelException
func (logLevelEx *LogLevelException) IsAllowed(level LogLevel) bool {
return logLevelEx.constraints.IsAllowed(level)
}
// FuncPattern returns the function pattern of a exception
func (logLevelEx *LogLevelException) FuncPattern() string {
return logLevelEx.funcPattern
}
// FuncPattern returns the file pattern of a exception
func (logLevelEx *LogLevelException) FilePattern() string {
return logLevelEx.filePattern
}
// initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts
func (logLevelEx *LogLevelException) initFuncPatternParts(funcPattern string) (err error) {
if funcFormatValidator.FindString(funcPattern) != funcPattern {
return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)")
}
logLevelEx.funcPatternParts = splitPattern(funcPattern)
return nil
}
// Checks whether the file filter has a correct format and splits file patterns using splitPattern.
func (logLevelEx *LogLevelException) initFilePatternParts(filePattern string) (err error) {
if fileFormatValidator.FindString(filePattern) != filePattern {
return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)")
}
logLevelEx.filePatternParts = splitPattern(filePattern)
return err
}
func (logLevelEx *LogLevelException) match(funcPath string, filePath string) bool {
if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) {
return false
}
return stringMatchesPattern(logLevelEx.filePatternParts, filePath)
}
func (logLevelEx *LogLevelException) String() string {
str := fmt.Sprintf("Func: %s File: %s", logLevelEx.funcPattern, logLevelEx.filePattern)
if logLevelEx.constraints != nil {
str += fmt.Sprintf("Constr: %s", logLevelEx.constraints)
} else {
str += "nil"
}
return str
}
// splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"]
func splitPattern(pattern string) []string {
var patternParts []string
var lastChar rune
for _, char := range pattern {
if char == '*' {
if lastChar != '*' {
patternParts = append(patternParts, "*")
}
} else {
if len(patternParts) != 0 && lastChar != '*' {
patternParts[len(patternParts)-1] += string(char)
} else {
patternParts = append(patternParts, string(char))
}
}
lastChar = char
}
return patternParts
}
// stringMatchesPattern check whether testString matches pattern with asterisks.
// Standard regexp functionality is not used here because of performance issues.
func stringMatchesPattern(patternparts []string, testString string) bool {
if len(patternparts) == 0 {
return len(testString) == 0
}
part := patternparts[0]
if part != "*" {
index := strings.Index(testString, part)
if index == 0 {
return stringMatchesPattern(patternparts[1:], testString[len(part):])
}
} else {
if len(patternparts) == 1 {
return true
}
newTestString := testString
part = patternparts[1]
for {
index := strings.Index(newTestString, part)
if index == -1 {
break
}
newTestString = newTestString[index+len(part):]
result := stringMatchesPattern(patternparts[2:], newTestString)
if result {
return true
}
}
}
return false
}

31
vendor/github.com/cihub/seelog/common_flusher.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// flusherInterface represents all objects that have to do cleanup
// at certain moments of time (e.g. before app shutdown to avoid data loss)
type flusherInterface interface {
Flush()
}

81
vendor/github.com/cihub/seelog/common_loglevel.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
// Log level type
type LogLevel uint8
// Log levels
const (
TraceLvl = iota
DebugLvl
InfoLvl
WarnLvl
ErrorLvl
CriticalLvl
Off
)
// Log level string representations (used in configuration files)
const (
TraceStr = "trace"
DebugStr = "debug"
InfoStr = "info"
WarnStr = "warn"
ErrorStr = "error"
CriticalStr = "critical"
OffStr = "off"
)
var levelToStringRepresentations = map[LogLevel]string{
TraceLvl: TraceStr,
DebugLvl: DebugStr,
InfoLvl: InfoStr,
WarnLvl: WarnStr,
ErrorLvl: ErrorStr,
CriticalLvl: CriticalStr,
Off: OffStr,
}
// LogLevelFromString parses a string and returns a corresponding log level, if sucessfull.
func LogLevelFromString(levelStr string) (level LogLevel, found bool) {
for lvl, lvlStr := range levelToStringRepresentations {
if lvlStr == levelStr {
return lvl, true
}
}
return 0, false
}
// LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels.
func (level LogLevel) String() string {
levelStr, ok := levelToStringRepresentations[level]
if ok {
return levelStr
}
return ""
}

242
vendor/github.com/cihub/seelog/dispatch_custom.go generated vendored Normal file
View File

@ -0,0 +1,242 @@
// Copyright (c) 2013 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"reflect"
"sort"
)
var registeredReceivers = make(map[string]reflect.Type)
// RegisterReceiver records a custom receiver type, identified by a value
// of that type (second argument), under the specified name. Registered
// names can be used in the "name" attribute of <custom> config items.
//
// RegisterReceiver takes the type of the receiver argument, without taking
// the value into the account. So do NOT enter any data to the second argument
// and only call it like:
// RegisterReceiver("somename", &MyReceiverType{})
//
// After that, when a '<custom>' config tag with this name is used,
// a receiver of the specified type would be instantiated. Check
// CustomReceiver comments for interface details.
//
// NOTE 1: RegisterReceiver fails if you attempt to register different types
// with the same name.
//
// NOTE 2: RegisterReceiver registers those receivers that must be used in
// the configuration files (<custom> items). Basically it is just the way
// you tell seelog config parser what should it do when it meets a
// <custom> tag with a specific name and data attributes.
//
// But If you are only using seelog as a proxy to an already instantiated
// CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver.
func RegisterReceiver(name string, receiver CustomReceiver) {
newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface())
if t, ok := registeredReceivers[name]; ok && t != newType {
panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType))
}
registeredReceivers[name] = newType
}
func customReceiverByName(name string) (creceiver CustomReceiver, err error) {
rt, ok := registeredReceivers[name]
if !ok {
return nil, fmt.Errorf("custom receiver name not registered: '%s'", name)
}
v, ok := reflect.New(rt).Interface().(CustomReceiver)
if !ok {
return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name)
}
return v, nil
}
// CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init
// func when custom receiver is being initialized.
type CustomReceiverInitArgs struct {
// XmlCustomAttrs represent '<custom>' xml config item attributes that
// start with "data-". Map keys will be the attribute names without the "data-".
// Map values will the those attribute values.
//
// E.g. if you have a '<custom name="somename" data-attr1="a1" data-attr2="a2"/>'
// you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2"
//
// Note that in custom items you can only use allowed attributes, like "name" and
// your custom attributes, starting with "data-". Any other will lead to a
// parsing error.
XmlCustomAttrs map[string]string
}
// CustomReceiver is the interface that external custom seelog message receivers
// must implement in order to be able to process seelog messages. Those receivers
// are set in the xml config file using the <custom> tag. Check receivers reference
// wiki section on that.
//
// Use seelog.RegisterReceiver on the receiver type before using it.
type CustomReceiver interface {
// ReceiveMessage is called when the custom receiver gets seelog message from
// a parent dispatcher.
//
// Message, level and context args represent all data that was included in the seelog
// message at the time it was logged.
//
// The formatting is already applied to the message and depends on the config
// like with any other receiver.
//
// If you would like to inform seelog of an error that happened during the handling of
// the message, return a non-nil error. This way you'll end up seeing your error like
// any other internal seelog error.
ReceiveMessage(message string, level LogLevel, context LogContextInterface) error
// AfterParse is called immediately after your custom receiver is instantiated by
// the xml config parser. So, if you need to do any startup logic after config parsing,
// like opening file or allocating any resources after the receiver is instantiated, do it here.
//
// If this func returns a non-nil error, then the loading procedure will fail. E.g.
// if you are loading a seelog xml config, the parser would not finish the loading
// procedure and inform about an error like with any other config error.
//
// If your custom logger needs some configuration, you can use custom attributes in
// your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments.
//
// IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used
// to create seelog proxy logger using the custom receiver. This func is only called when
// receiver is instantiated from a config.
AfterParse(initArgs CustomReceiverInitArgs) error
// Flush is called when the custom receiver gets a 'flush' directive from a
// parent receiver. If custom receiver implements some kind of buffering or
// queing, then the appropriate reaction on a flush message is synchronous
// flushing of all those queues/buffers. If custom receiver doesn't have
// such mechanisms, then flush implementation may be left empty.
Flush()
// Close is called when the custom receiver gets a 'close' directive from a
// parent receiver. This happens when a top-level seelog dispatcher is sending
// 'close' to all child nodes and it means that current seelog logger is being closed.
// If you need to do any cleanup after your custom receiver is done, you should do
// it here.
Close() error
}
type customReceiverDispatcher struct {
formatter *formatter
innerReceiver CustomReceiver
customReceiverName string
usedArgs CustomReceiverInitArgs
}
// NewCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created
// using a <custom> tag in the config file.
func NewCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if len(customReceiverName) == 0 {
return nil, errors.New("custom receiver name cannot be empty")
}
creceiver, err := customReceiverByName(customReceiverName)
if err != nil {
return nil, err
}
err = creceiver.AfterParse(cArgs)
if err != nil {
return nil, err
}
disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs}
return disp, nil
}
// NewCustomReceiverDispatcherByValue is basically the same as NewCustomReceiverDispatcher, but using
// a specific CustomReceiver value instead of instantiating a new one by type.
func NewCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if customReceiver == nil {
return nil, errors.New("customReceiver cannot be nil")
}
disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs}
return disp, nil
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
defer func() {
if err := recover(); err != nil {
errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err))
}
}()
err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context)
if err != nil {
errorFunc(err)
}
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Flush() {
disp.innerReceiver.Flush()
}
// CustomReceiver implementation. Check CustomReceiver comments.
func (disp *customReceiverDispatcher) Close() error {
disp.innerReceiver.Flush()
err := disp.innerReceiver.Close()
if err != nil {
return err
}
return nil
}
func (disp *customReceiverDispatcher) String() string {
datas := ""
skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs))
for i := range disp.usedArgs.XmlCustomAttrs {
skeys = append(skeys, i)
}
sort.Strings(skeys)
for _, key := range skeys {
datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key])
}
str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n",
disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver)
return str
}

189
vendor/github.com/cihub/seelog/dispatch_dispatcher.go generated vendored Normal file
View File

@ -0,0 +1,189 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"io"
)
// A dispatcherInterface is used to dispatch message to all underlying receivers.
// Dispatch logic depends on given context and log level. Any errors are reported using errorFunc.
// Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs
// an immediate cleanup of all data that is stored in the receivers
type dispatcherInterface interface {
flusherInterface
io.Closer
Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error))
}
type dispatcher struct {
formatter *formatter
writers []*formattedWriter
dispatchers []dispatcherInterface
}
// Creates a dispatcher which dispatches data to a list of receivers.
// Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned
func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) {
if formatter == nil {
return nil, errors.New("formatter cannot be nil")
}
if receivers == nil || len(receivers) == 0 {
return nil, errors.New("receivers cannot be nil or empty")
}
disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)}
for _, receiver := range receivers {
writer, ok := receiver.(*formattedWriter)
if ok {
disp.writers = append(disp.writers, writer)
continue
}
ioWriter, ok := receiver.(io.Writer)
if ok {
writer, err := NewFormattedWriter(ioWriter, disp.formatter)
if err != nil {
return nil, err
}
disp.writers = append(disp.writers, writer)
continue
}
dispInterface, ok := receiver.(dispatcherInterface)
if ok {
disp.dispatchers = append(disp.dispatchers, dispInterface)
continue
}
return nil, errors.New("method can receive either io.Writer or dispatcherInterface")
}
return disp, nil
}
func (disp *dispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
for _, writer := range disp.writers {
err := writer.Write(message, level, context)
if err != nil {
errorFunc(err)
}
}
for _, dispInterface := range disp.dispatchers {
dispInterface.Dispatch(message, level, context, errorFunc)
}
}
// Flush goes through all underlying writers which implement flusherInterface interface
// and closes them. Recursively performs the same action for underlying dispatchers
func (disp *dispatcher) Flush() {
for _, disp := range disp.Dispatchers() {
disp.Flush()
}
for _, formatWriter := range disp.Writers() {
flusher, ok := formatWriter.Writer().(flusherInterface)
if ok {
flusher.Flush()
}
}
}
// Close goes through all underlying writers which implement io.Closer interface
// and closes them. Recursively performs the same action for underlying dispatchers
// Before closing, writers are flushed to prevent loss of any buffered data, so
// a call to Flush() func before Close() is not necessary
func (disp *dispatcher) Close() error {
for _, disp := range disp.Dispatchers() {
disp.Flush()
err := disp.Close()
if err != nil {
return err
}
}
for _, formatWriter := range disp.Writers() {
flusher, ok := formatWriter.Writer().(flusherInterface)
if ok {
flusher.Flush()
}
closer, ok := formatWriter.Writer().(io.Closer)
if ok {
err := closer.Close()
if err != nil {
return err
}
}
}
return nil
}
func (disp *dispatcher) Writers() []*formattedWriter {
return disp.writers
}
func (disp *dispatcher) Dispatchers() []dispatcherInterface {
return disp.dispatchers
}
func (disp *dispatcher) String() string {
str := "formatter: " + disp.formatter.String() + "\n"
str += " ->Dispatchers:"
if len(disp.dispatchers) == 0 {
str += "none\n"
} else {
str += "\n"
for _, disp := range disp.dispatchers {
str += fmt.Sprintf(" ->%s", disp)
}
}
str += " ->Writers:"
if len(disp.writers) == 0 {
str += "none\n"
} else {
str += "\n"
for _, writer := range disp.writers {
str += fmt.Sprintf(" ->%s\n", writer)
}
}
return str
}

View File

@ -0,0 +1,66 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// A filterDispatcher writes the given message to underlying receivers only if message log level
// is in the allowed list.
type filterDispatcher struct {
*dispatcher
allowList map[LogLevel]bool
}
// NewFilterDispatcher creates a new filterDispatcher using a list of allowed levels.
func NewFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) {
disp, err := createDispatcher(formatter, receivers)
if err != nil {
return nil, err
}
allows := make(map[LogLevel]bool)
for _, allowLevel := range allowList {
allows[allowLevel] = true
}
return &filterDispatcher{disp, allows}, nil
}
func (filter *filterDispatcher) Dispatch(
message string,
level LogLevel,
context LogContextInterface,
errorFunc func(err error)) {
isAllowed, ok := filter.allowList[level]
if ok && isAllowed {
filter.dispatcher.Dispatch(message, level, context, errorFunc)
}
}
func (filter *filterDispatcher) String() string {
return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher)
}

View File

@ -0,0 +1,47 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
)
// A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.)
type splitDispatcher struct {
*dispatcher
}
func NewSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) {
disp, err := createDispatcher(formatter, receivers)
if err != nil {
return nil, err
}
return &splitDispatcher{disp}, nil
}
func (splitter *splitDispatcher) String() string {
return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String())
}

175
vendor/github.com/cihub/seelog/doc.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
// Copyright (c) 2014 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package seelog implements logging functionality with flexible dispatching, filtering, and formatting.
Creation
To create a logger, use one of the following constructors:
func LoggerFromConfigAsBytes
func LoggerFromConfigAsFile
func LoggerFromConfigAsString
func LoggerFromWriterWithMinLevel
func LoggerFromWriterWithMinLevelAndFormat
func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers)
Example:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
defer logger.Flush()
... use logger ...
}
The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some
messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you
explicitly defer flushing all messages before closing.
Usage
Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs.
Example:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
defer logger.Flush()
logger.Trace("test")
logger.Debugf("var = %s", "abc")
}
Having loggers as variables is convenient if you are writing your own package with internal logging or if you have
several loggers with different options.
But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level
var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs:
import log "github.com/cihub/seelog"
func main() {
logger, err := log.LoggerFromConfigAsFile("seelog.xml")
if err != nil {
panic(err)
}
log.ReplaceLogger(logger)
defer log.Flush()
log.Trace("test")
log.Debugf("var = %s", "abc")
}
Last lines
log.Trace("test")
log.Debugf("var = %s", "abc")
do the same as
log.Current.Trace("test")
log.Current.Debugf("var = %s", "abc")
In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config.
This way you are able to use package level funcs instead of passing the logger variable.
Configuration
Main seelog point is to configure logger via config files and not the code.
The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try
to create a logger using it.
All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki.
There are many sections covering different aspects of seelog, but the most important for understanding configs are:
https://github.com/cihub/seelog/wiki/Constraints-and-exceptions
https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers
https://github.com/cihub/seelog/wiki/Formatting
https://github.com/cihub/seelog/wiki/Logger-types
After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date
list of dispatchers, receivers, formats, and logger types.
Here is an example config with all these features:
<seelog type="adaptive" mininterval="2000000" maxinterval="100000000" critmsgcount="500" minlevel="debug">
<exceptions>
<exception filepattern="test*" minlevel="error"/>
</exceptions>
<outputs formatid="all">
<file path="all.log"/>
<filter levels="info">
<console formatid="fmtinfo"/>
</filter>
<filter levels="error,critical" formatid="fmterror">
<console/>
<file path="errors.log"/>
</filter>
</outputs>
<formats>
<format id="fmtinfo" format="[%Level] [%Time] %Msg%n"/>
<format id="fmterror" format="[%LEVEL] [%Time] [%FuncShort @ %File.%Line] %Msg%n"/>
<format id="all" format="[%Level] [%Time] [@ %File.%Line] %Msg%n"/>
<format id="criticalemail" format="Critical error on our server!\n %Time %Date %RelFile %Func %Msg \nSent by Seelog"/>
</formats>
</seelog>
This config represents a logger with adaptive timeout between log messages (check logger types reference) which
logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only
use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test'
this logger prohibits all levels below 'error'.
Configuration using code
Although configuration using code is not recommended, it is sometimes needed and it is possible to do with seelog. Basically, what
you need to do to get started is to create constraints, exceptions and a dispatcher tree (same as with config). Most of the New*
functions in this package are used to provide such capabilities.
Here is an example of configuration in code, that demonstrates an async loop logger that logs to a simple split dispatcher with
a console receiver using a specified format and is filtered using a top-level min-max constraints and one expection for
the 'main.go' file. So, this is basically a demonstration of configuration of most of the features:
package main
import log "github.com/cihub/seelog"
func main() {
defer log.Flush()
log.Info("Hello from Seelog!")
consoleWriter, _ := log.NewConsoleWriter()
formatter, _ := log.NewFormatter("%Level %Msg %File%n")
root, _ := log.NewSplitDispatcher(formatter, []interface{}{consoleWriter})
constraints, _ := log.NewMinMaxConstraints(log.TraceLvl, log.CriticalLvl)
specificConstraints, _ := log.NewListConstraints([]log.LogLevel{log.InfoLvl, log.ErrorLvl})
ex, _ := log.NewLogLevelException("*", "*main.go", specificConstraints)
exceptions := []*log.LogLevelException{ex}
logger := log.NewAsyncLoopLogger(log.NewLoggerConfig(constraints, exceptions, root))
log.ReplaceLogger(logger)
log.Trace("This should not be seen")
log.Debug("This should not be seen")
log.Info("Test")
log.Error("Test2")
}
Examples
To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples
It contains many example configs and usecases.
*/
package seelog

466
vendor/github.com/cihub/seelog/format.go generated vendored Normal file
View File

@ -0,0 +1,466 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// FormatterSymbol is a special symbol used in config files to mark special format aliases.
const (
FormatterSymbol = '%'
)
const (
formatterParameterStart = '('
formatterParameterEnd = ')'
)
// Time and date formats used for %Date and %Time aliases.
const (
DateDefaultFormat = "2006-01-02"
TimeFormat = "15:04:05"
)
var DefaultMsgFormat = "%Ns [%Level] %Msg%n"
var (
DefaultFormatter *formatter
msgonlyformatter *formatter
)
func init() {
var err error
if DefaultFormatter, err = NewFormatter(DefaultMsgFormat); err != nil {
reportInternalError(fmt.Errorf("error during creating DefaultFormatter: %s", err))
}
if msgonlyformatter, err = NewFormatter("%Msg"); err != nil {
reportInternalError(fmt.Errorf("error during creating msgonlyformatter: %s", err))
}
}
// FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute
// of the 'format' config item. These special symbols are replaced with context values or special
// strings when message is written to byte receiver.
//
// Check https://github.com/cihub/seelog/wiki/Formatting for details.
// Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference
//
// FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object
// that can be evaluated as string.
type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{}
// FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized
// formatters (such as %Date or %EscM) and custom user formatters.
type FormatterFuncCreator func(param string) FormatterFunc
var formatterFuncs = map[string]FormatterFunc{
"Level": formatterLevel,
"Lev": formatterLev,
"LEVEL": formatterLEVEL,
"LEV": formatterLEV,
"l": formatterl,
"Msg": formatterMsg,
"FullPath": formatterFullPath,
"File": formatterFile,
"RelFile": formatterRelFile,
"Func": FormatterFunction,
"FuncShort": FormatterFunctionShort,
"Line": formatterLine,
"Time": formatterTime,
"UTCTime": formatterUTCTime,
"Ns": formatterNs,
"UTCNs": formatterUTCNs,
"r": formatterr,
"n": formattern,
"t": formattert,
}
var formatterFuncsParameterized = map[string]FormatterFuncCreator{
"Date": createDateTimeFormatterFunc,
"UTCDate": createUTCDateTimeFormatterFunc,
"EscM": createANSIEscapeFunc,
}
func errorAliasReserved(name string) error {
return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name)
}
// RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil,
// then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and
// it will be treated like the standard parameterized formatter identifiers.
//
// RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation
// is to call it once in 'init' func of your application or any initializer func.
//
// For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters.
//
// Name must only consist of letters (unicode.IsLetter).
//
// Name must not be one of the already registered standard formatter names
// (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered
// custom format names. To avoid any potential name conflicts (in future releases), it is recommended
// to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword.
func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error {
if _, ok := formatterFuncs[name]; ok {
return errorAliasReserved(name)
}
if _, ok := formatterFuncsParameterized[name]; ok {
return errorAliasReserved(name)
}
formatterFuncsParameterized[name] = creator
return nil
}
// formatter is used to write messages in a specific format, inserting such additional data
// as log level, date/time, etc.
type formatter struct {
fmtStringOriginal string
fmtString string
formatterFuncs []FormatterFunc
}
// NewFormatter creates a new formatter using a format string
func NewFormatter(formatString string) (*formatter, error) {
fmtr := new(formatter)
fmtr.fmtStringOriginal = formatString
if err := buildFormatterFuncs(fmtr); err != nil {
return nil, err
}
return fmtr, nil
}
func buildFormatterFuncs(formatter *formatter) error {
var (
fsbuf = new(bytes.Buffer)
fsolm1 = len(formatter.fmtStringOriginal) - 1
)
for i := 0; i <= fsolm1; i++ {
if char := formatter.fmtStringOriginal[i]; char != FormatterSymbol {
fsbuf.WriteByte(char)
continue
}
// Check if the index is at the end of the string.
if i == fsolm1 {
return fmt.Errorf("format error: %c cannot be last symbol", FormatterSymbol)
}
// Check if the formatter symbol is doubled and skip it as nonmatching.
if formatter.fmtStringOriginal[i+1] == FormatterSymbol {
fsbuf.WriteRune(FormatterSymbol)
i++
continue
}
function, ni, err := formatter.extractFormatterFunc(i + 1)
if err != nil {
return err
}
// Append formatting string "%v".
fsbuf.Write([]byte{37, 118})
i = ni
formatter.formatterFuncs = append(formatter.formatterFuncs, function)
}
formatter.fmtString = fsbuf.String()
return nil
}
func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) {
letterSequence := formatter.extractLetterSequence(index)
if len(letterSequence) == 0 {
return nil, 0, fmt.Errorf("format error: lack of formatter after %c at %d", FormatterSymbol, index)
}
function, formatterLength, ok := formatter.findFormatterFunc(letterSequence)
if ok {
return function, index + formatterLength - 1, nil
}
function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index)
if err != nil {
return nil, 0, err
}
if ok {
return function, index + formatterLength - 1, nil
}
return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence)
}
func (formatter *formatter) extractLetterSequence(index int) string {
letters := ""
bytesToParse := []byte(formatter.fmtStringOriginal[index:])
runeCount := utf8.RuneCount(bytesToParse)
for i := 0; i < runeCount; i++ {
rune, runeSize := utf8.DecodeRune(bytesToParse)
bytesToParse = bytesToParse[runeSize:]
if unicode.IsLetter(rune) {
letters += string(rune)
} else {
break
}
}
return letters
}
func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) {
currentVerb := letters
for i := 0; i < len(letters); i++ {
function, ok := formatterFuncs[currentVerb]
if ok {
return function, len(currentVerb), ok
}
currentVerb = currentVerb[:len(currentVerb)-1]
}
return nil, 0, false
}
func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) {
currentVerb := letters
for i := 0; i < len(letters); i++ {
functionCreator, ok := formatterFuncsParameterized[currentVerb]
if ok {
parameter := ""
parameterLen := 0
isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless
if isVerbEqualsLetters {
userParameter := ""
var err error
userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb))
if ok {
parameter = userParameter
} else if err != nil {
return nil, 0, false, err
}
}
return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil
}
currentVerb = currentVerb[:len(currentVerb)-1]
}
return nil, 0, false, nil
}
func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) {
if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart {
return "", 0, false, nil
}
endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd))
if endIndex == -1 {
return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s",
startIndex, formatter.fmtStringOriginal[startIndex:])
}
endIndex += startIndex
length := endIndex - startIndex + 1
return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil
}
// Format processes a message with special formatters, log level, and context. Returns formatted string
// with all formatter identifiers changed to appropriate values.
func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string {
if len(formatter.formatterFuncs) == 0 {
return formatter.fmtString
}
params := make([]interface{}, len(formatter.formatterFuncs))
for i, function := range formatter.formatterFuncs {
params[i] = function(message, level, context)
}
return fmt.Sprintf(formatter.fmtString, params...)
}
func (formatter *formatter) String() string {
return formatter.fmtStringOriginal
}
//=====================================================
const (
wrongLogLevel = "WRONG_LOGLEVEL"
wrongEscapeCode = "WRONG_ESCAPE"
)
var levelToString = map[LogLevel]string{
TraceLvl: "Trace",
DebugLvl: "Debug",
InfoLvl: "Info",
WarnLvl: "Warn",
ErrorLvl: "Error",
CriticalLvl: "Critical",
Off: "Off",
}
var levelToShortString = map[LogLevel]string{
TraceLvl: "Trc",
DebugLvl: "Dbg",
InfoLvl: "Inf",
WarnLvl: "Wrn",
ErrorLvl: "Err",
CriticalLvl: "Crt",
Off: "Off",
}
var levelToShortestString = map[LogLevel]string{
TraceLvl: "t",
DebugLvl: "d",
InfoLvl: "i",
WarnLvl: "w",
ErrorLvl: "e",
CriticalLvl: "c",
Off: "o",
}
func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToShortString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} {
return strings.ToTitle(formatterLevel(message, level, context).(string))
}
func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} {
return strings.ToTitle(formatterLev(message, level, context).(string))
}
func formatterl(message string, level LogLevel, context LogContextInterface) interface{} {
levelStr, ok := levelToShortestString[level]
if !ok {
return wrongLogLevel
}
return levelStr
}
func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} {
return message
}
func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} {
return context.FullPath()
}
func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} {
return context.FileName()
}
func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} {
return context.ShortPath()
}
func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} {
return context.Func()
}
func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} {
f := context.Func()
spl := strings.Split(f, ".")
return spl[len(spl)-1]
}
func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} {
return context.Line()
}
func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().Format(TimeFormat)
}
func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().Format(TimeFormat)
}
func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UnixNano()
}
func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().UnixNano()
}
func formatterr(message string, level LogLevel, context LogContextInterface) interface{} {
return "\r"
}
func formattern(message string, level LogLevel, context LogContextInterface) interface{} {
return "\n"
}
func formattert(message string, level LogLevel, context LogContextInterface) interface{} {
return "\t"
}
func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {
format := dateTimeFormat
if format == "" {
format = DateDefaultFormat
}
return func(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().Format(format)
}
}
func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc {
format := dateTimeFormat
if format == "" {
format = DateDefaultFormat
}
return func(message string, level LogLevel, context LogContextInterface) interface{} {
return context.CallTime().UTC().Format(format)
}
}
func createANSIEscapeFunc(escapeCodeString string) FormatterFunc {
return func(message string, level LogLevel, context LogContextInterface) interface{} {
if len(escapeCodeString) == 0 {
return wrongEscapeCode
}
return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString)
}
}

10
vendor/github.com/cihub/seelog/internals_baseerror.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package seelog
// Base struct for custom errors.
type baseError struct {
message string
}
func (be baseError) Error() string {
return be.message
}

320
vendor/github.com/cihub/seelog/internals_fsutils.go generated vendored Normal file
View File

@ -0,0 +1,320 @@
package seelog
import (
"fmt"
"io"
"os"
"path/filepath"
"sync"
)
// File and directory permitions.
const (
defaultFilePermissions = 0666
defaultDirectoryPermissions = 0767
)
const (
// Max number of directories can be read asynchronously.
maxDirNumberReadAsync = 1000
)
type cannotOpenFileError struct {
baseError
}
func newCannotOpenFileError(fname string) *cannotOpenFileError {
return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}}
}
type notDirectoryError struct {
baseError
}
func newNotDirectoryError(dname string) *notDirectoryError {
return &notDirectoryError{baseError{message: dname + " is not directory"}}
}
// fileFilter is a filtering criteria function for '*os.File'.
// Must return 'false' to set aside the given file.
type fileFilter func(os.FileInfo, *os.File) bool
// filePathFilter is a filtering creteria function for file path.
// Must return 'false' to set aside the given file.
type filePathFilter func(filePath string) bool
// GetSubdirNames returns a list of directories found in
// the given one with dirPath.
func getSubdirNames(dirPath string) ([]string, error) {
fi, err := os.Stat(dirPath)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, newNotDirectoryError(dirPath)
}
dd, err := os.Open(dirPath)
// Cannot open file.
if err != nil {
if dd != nil {
dd.Close()
}
return nil, err
}
defer dd.Close()
// TODO: Improve performance by buffering reading.
allEntities, err := dd.Readdir(-1)
if err != nil {
return nil, err
}
subDirs := []string{}
for _, entity := range allEntities {
if entity.IsDir() {
subDirs = append(subDirs, entity.Name())
}
}
return subDirs, nil
}
// getSubdirAbsPaths recursively visit all the subdirectories
// starting from the given directory and returns absolute paths for them.
func getAllSubdirAbsPaths(dirPath string) (res []string, err error) {
dps, err := getSubdirAbsPaths(dirPath)
if err != nil {
res = []string{}
return
}
res = append(res, dps...)
for _, dp := range dps {
sdps, err := getAllSubdirAbsPaths(dp)
if err != nil {
return []string{}, err
}
res = append(res, sdps...)
}
return
}
// getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory.
// Input: (I1) dirPath - absolute path of a directory in question.
// Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation.
// Remark: If error (O2) is non-nil then (O1) is nil and vice versa.
func getSubdirAbsPaths(dirPath string) ([]string, error) {
sdns, err := getSubdirNames(dirPath)
if err != nil {
return nil, err
}
rsdns := []string{}
for _, sdn := range sdns {
rsdns = append(rsdns, filepath.Join(dirPath, sdn))
}
return rsdns, nil
}
// getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory.
// Remark: Ignores files for which fileFilter returns false
func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) {
dfi, err := os.Open(dirPath)
if err != nil {
return nil, newCannotOpenFileError("Cannot open directory " + dirPath)
}
defer dfi.Close()
// Size of read buffer (i.e. chunk of items read at a time).
rbs := 64
resFiles := []*os.File{}
L:
for {
// Read directory entities by reasonable chuncks
// to prevent overflows on big number of files.
fis, e := dfi.Readdir(rbs)
switch e {
// It's OK.
case nil:
// Do nothing, just continue cycle.
case io.EOF:
break L
// Something went wrong.
default:
return nil, e
}
// THINK: Maybe, use async running.
for _, fi := range fis {
// NB: On Linux this could be a problem as
// there are lots of file types available.
if !fi.IsDir() {
f, e := os.Open(filepath.Join(dirPath, fi.Name()))
if e != nil {
if f != nil {
f.Close()
}
// THINK: Add nil as indicator that a problem occurred.
resFiles = append(resFiles, nil)
continue
}
// Check filter condition.
if fFilter != nil && !fFilter(fi, f) {
continue
}
resFiles = append(resFiles, f)
}
}
}
return resFiles, nil
}
func isRegular(m os.FileMode) bool {
return m&os.ModeType == 0
}
// getDirFilePaths return full paths of the files located in the directory.
// Remark: Ignores files for which fileFilter returns false.
func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) {
dfi, err := os.Open(dirPath)
if err != nil {
return nil, newCannotOpenFileError("Cannot open directory " + dirPath)
}
defer dfi.Close()
var absDirPath string
if !filepath.IsAbs(dirPath) {
absDirPath, err = filepath.Abs(dirPath)
if err != nil {
return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error())
}
} else {
absDirPath = dirPath
}
// TODO: check if dirPath is really directory.
// Size of read buffer (i.e. chunk of items read at a time).
rbs := 2 << 5
filePaths := []string{}
var fp string
L:
for {
// Read directory entities by reasonable chuncks
// to prevent overflows on big number of files.
fis, e := dfi.Readdir(rbs)
switch e {
// It's OK.
case nil:
// Do nothing, just continue cycle.
case io.EOF:
break L
// Indicate that something went wrong.
default:
return nil, e
}
// THINK: Maybe, use async running.
for _, fi := range fis {
// NB: Should work on every Windows and non-Windows OS.
if isRegular(fi.Mode()) {
if pathIsName {
fp = fi.Name()
} else {
// Build full path of a file.
fp = filepath.Join(absDirPath, fi.Name())
}
// Check filter condition.
if fpFilter != nil && !fpFilter(fp) {
continue
}
filePaths = append(filePaths, fp)
}
}
}
return filePaths, nil
}
// getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs
// in map 'filesInDirMap': Key - directory name, value - *os.File slice.
func getOpenFilesByDirectoryAsync(
dirPaths []string,
fFilter fileFilter,
filesInDirMap map[string][]*os.File,
) error {
n := len(dirPaths)
if n > maxDirNumberReadAsync {
return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync)
}
type filesInDirResult struct {
DirName string
Files []*os.File
Error error
}
dirFilesChan := make(chan *filesInDirResult, n)
var wg sync.WaitGroup
// Register n goroutines which are going to do work.
wg.Add(n)
for i := 0; i < n; i++ {
// Launch asynchronously the piece of work.
go func(dirPath string) {
fs, e := getOpenFilesInDir(dirPath, fFilter)
dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e}
// Mark the current goroutine as finished (work is done).
wg.Done()
}(dirPaths[i])
}
// Wait for all goroutines to finish their work.
wg.Wait()
// Close the error channel to let for-range clause
// get all the buffered values without blocking and quit in the end.
close(dirFilesChan)
for fidr := range dirFilesChan {
if fidr.Error == nil {
// THINK: What will happen if the key is already present?
filesInDirMap[fidr.DirName] = fidr.Files
} else {
return fidr.Error
}
}
return nil
}
// fileExists return flag whether a given file exists
// and operation error if an unclassified failure occurs.
func fileExists(path string) (bool, error) {
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// createDirectory makes directory with a given name
// making all parent directories if necessary.
func createDirectory(dirPath string) error {
var dPath string
var err error
if !filepath.IsAbs(dirPath) {
dPath, err = filepath.Abs(dirPath)
if err != nil {
return err
}
} else {
dPath = dirPath
}
exists, err := fileExists(dPath)
if err != nil {
return err
}
if exists {
return nil
}
return os.MkdirAll(dPath, os.ModeDir)
}
// tryRemoveFile gives a try removing the file
// only ignoring an error when the file does not exist.
func tryRemoveFile(filePath string) (err error) {
err = os.Remove(filePath)
if os.IsNotExist(err) {
err = nil
return
}
return
}

175
vendor/github.com/cihub/seelog/internals_xmlnode.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"encoding/xml"
"errors"
"fmt"
"io"
"strings"
)
type xmlNode struct {
name string
attributes map[string]string
children []*xmlNode
value string
}
func newNode() *xmlNode {
node := new(xmlNode)
node.children = make([]*xmlNode, 0)
node.attributes = make(map[string]string)
return node
}
func (node *xmlNode) String() string {
str := fmt.Sprintf("<%s", node.name)
for attrName, attrVal := range node.attributes {
str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal)
}
str += ">"
str += node.value
if len(node.children) != 0 {
for _, child := range node.children {
str += fmt.Sprintf("%s", child)
}
}
str += fmt.Sprintf("</%s>", node.name)
return str
}
func (node *xmlNode) unmarshal(startEl xml.StartElement) error {
node.name = startEl.Name.Local
for _, v := range startEl.Attr {
_, alreadyExists := node.attributes[v.Name.Local]
if alreadyExists {
return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'")
}
node.attributes[v.Name.Local] = v.Value
}
return nil
}
func (node *xmlNode) add(child *xmlNode) {
if node.children == nil {
node.children = make([]*xmlNode, 0)
}
node.children = append(node.children, child)
}
func (node *xmlNode) hasChildren() bool {
return node.children != nil && len(node.children) > 0
}
//=============================================
func unmarshalConfig(reader io.Reader) (*xmlNode, error) {
xmlParser := xml.NewDecoder(reader)
config, err := unmarshalNode(xmlParser, nil)
if err != nil {
return nil, err
}
if config == nil {
return nil, errors.New("xml has no content")
}
nextConfigEntry, err := unmarshalNode(xmlParser, nil)
if nextConfigEntry != nil {
return nil, errors.New("xml contains more than one root element")
}
return config, nil
}
func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) {
firstLoop := true
for {
var tok xml.Token
if firstLoop && curToken != nil {
tok = curToken
firstLoop = false
} else {
tok, err = getNextToken(xmlParser)
if err != nil || tok == nil {
return
}
}
switch tt := tok.(type) {
case xml.SyntaxError:
err = errors.New(tt.Error())
return
case xml.CharData:
value := strings.TrimSpace(string([]byte(tt)))
if node != nil {
node.value += value
}
case xml.StartElement:
if node == nil {
node = newNode()
err := node.unmarshal(tt)
if err != nil {
return nil, err
}
} else {
childNode, childErr := unmarshalNode(xmlParser, tok)
if childErr != nil {
return nil, childErr
}
if childNode != nil {
node.add(childNode)
} else {
return
}
}
case xml.EndElement:
return
}
}
}
func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) {
if tok, err = xmlParser.Token(); err != nil {
if err == io.EOF {
err = nil
return
}
return
}
return
}

307
vendor/github.com/cihub/seelog/log.go generated vendored Normal file
View File

@ -0,0 +1,307 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"sync"
"time"
)
const (
staticFuncCallDepth = 3 // See 'commonLogger.log' method comments
loggerFuncCallDepth = 3
)
// Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc.
var Current LoggerInterface
// Default logger that is created from an empty config: "<seelog/>". It is not closed by a ReplaceLogger call.
var Default LoggerInterface
// Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call.
var Disabled LoggerInterface
var pkgOperationsMutex *sync.Mutex
func init() {
pkgOperationsMutex = new(sync.Mutex)
var err error
if Default == nil {
Default, err = LoggerFromConfigAsBytes([]byte("<seelog />"))
}
if Disabled == nil {
Disabled, err = LoggerFromConfigAsBytes([]byte("<seelog levels=\"off\"/>"))
}
if err != nil {
panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error()))
}
Current = Default
}
func createLoggerFromFullConfig(config *configForParsing) (LoggerInterface, error) {
if config.LogType == syncloggerTypeFromString {
return NewSyncLogger(&config.logConfig), nil
} else if config.LogType == asyncLooploggerTypeFromString {
return NewAsyncLoopLogger(&config.logConfig), nil
} else if config.LogType == asyncTimerloggerTypeFromString {
logData := config.LoggerData
if logData == nil {
return nil, errors.New("async timer data not set")
}
asyncInt, ok := logData.(asyncTimerLoggerData)
if !ok {
return nil, errors.New("invalid async timer data")
}
logger, err := NewAsyncTimerLogger(&config.logConfig, time.Duration(asyncInt.AsyncInterval))
if !ok {
return nil, err
}
return logger, nil
} else if config.LogType == adaptiveLoggerTypeFromString {
logData := config.LoggerData
if logData == nil {
return nil, errors.New("adaptive logger parameters not set")
}
adaptData, ok := logData.(adaptiveLoggerData)
if !ok {
return nil, errors.New("invalid adaptive logger parameters")
}
logger, err := NewAsyncAdaptiveLogger(
&config.logConfig,
time.Duration(adaptData.MinInterval),
time.Duration(adaptData.MaxInterval),
adaptData.CriticalMsgCount,
)
if err != nil {
return nil, err
}
return logger, nil
}
return nil, errors.New("invalid config log type/data")
}
// UseLogger sets the 'Current' package level logger variable to the specified value.
// This variable is used in all Trace/Debug/... package level convenience funcs.
//
// Example:
//
// after calling
// seelog.UseLogger(somelogger)
// the following:
// seelog.Debug("abc")
// will be equal to
// somelogger.Debug("abc")
//
// IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if
// you constantly use it to replace loggers and don't close them in other code, you'll
// end up having memory leaks.
//
// To safely replace loggers, use ReplaceLogger.
func UseLogger(logger LoggerInterface) error {
if logger == nil {
return errors.New("logger can not be nil")
}
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
oldLogger := Current
Current = logger
if oldLogger != nil {
oldLogger.Flush()
}
return nil
}
// ReplaceLogger acts as UseLogger but the logger that was previously
// used is disposed (except Default and Disabled loggers).
//
// Example:
// import log "github.com/cihub/seelog"
//
// func main() {
// logger, err := log.LoggerFromConfigAsFile("seelog.xml")
//
// if err != nil {
// panic(err)
// }
//
// log.ReplaceLogger(logger)
// defer log.Flush()
//
// log.Trace("test")
// log.Debugf("var = %s", "abc")
// }
func ReplaceLogger(logger LoggerInterface) error {
if logger == nil {
return errors.New("logger can not be nil")
}
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
defer func() {
if err := recover(); err != nil {
reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err))
}
}()
if Current == Default {
Current.Flush()
} else if Current != nil && !Current.Closed() && Current != Disabled {
Current.Flush()
Current.Close()
}
Current = logger
return nil
}
// Tracef formats message according to format specifier
// and writes to default logger with log level = Trace.
func Tracef(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Debugf formats message according to format specifier
// and writes to default logger with log level = Debug.
func Debugf(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Infof formats message according to format specifier
// and writes to default logger with log level = Info.
func Infof(format string, params ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params))
}
// Warnf formats message according to format specifier and writes to default logger with log level = Warn
func Warnf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.warnWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Errorf formats message according to format specifier and writes to default logger with log level = Error
func Errorf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.errorWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Criticalf formats message according to format specifier and writes to default logger with log level = Critical
func Criticalf(format string, params ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogFormattedMessage(format, params)
Current.criticalWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Trace formats message using the default formats for its operands and writes to default logger with log level = Trace
func Trace(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Debug formats message using the default formats for its operands and writes to default logger with log level = Debug
func Debug(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Info formats message using the default formats for its operands and writes to default logger with log level = Info
func Info(v ...interface{}) {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v))
}
// Warn formats message using the default formats for its operands and writes to default logger with log level = Warn
func Warn(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.warnWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Error formats message using the default formats for its operands and writes to default logger with log level = Error
func Error(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.errorWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Critical formats message using the default formats for its operands and writes to default logger with log level = Critical
func Critical(v ...interface{}) error {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
message := newLogMessage(v)
Current.criticalWithCallDepth(staticFuncCallDepth, message)
return errors.New(message.String())
}
// Flush immediately processes all currently queued messages and all currently buffered messages.
// It is a blocking call which returns only after the queue is empty and all the buffers are empty.
//
// If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '<buffered>' receivers)
// , because there is no queue.
//
// Call this method when your app is going to shut down not to lose any log messages.
func Flush() {
pkgOperationsMutex.Lock()
defer pkgOperationsMutex.Unlock()
Current.Flush()
}

370
vendor/github.com/cihub/seelog/logger.go generated vendored Normal file
View File

@ -0,0 +1,370 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"os"
"sync"
)
func reportInternalError(err error) {
fmt.Fprintf(os.Stderr, "seelog internal error: %s\n", err)
}
// LoggerInterface represents structs capable of logging Seelog messages
type LoggerInterface interface {
// Tracef formats message according to format specifier
// and writes to log with level = Trace.
Tracef(format string, params ...interface{})
// Debugf formats message according to format specifier
// and writes to log with level = Debug.
Debugf(format string, params ...interface{})
// Infof formats message according to format specifier
// and writes to log with level = Info.
Infof(format string, params ...interface{})
// Warnf formats message according to format specifier
// and writes to log with level = Warn.
Warnf(format string, params ...interface{}) error
// Errorf formats message according to format specifier
// and writes to log with level = Error.
Errorf(format string, params ...interface{}) error
// Criticalf formats message according to format specifier
// and writes to log with level = Critical.
Criticalf(format string, params ...interface{}) error
// Trace formats message using the default formats for its operands
// and writes to log with level = Trace
Trace(v ...interface{})
// Debug formats message using the default formats for its operands
// and writes to log with level = Debug
Debug(v ...interface{})
// Info formats message using the default formats for its operands
// and writes to log with level = Info
Info(v ...interface{})
// Warn formats message using the default formats for its operands
// and writes to log with level = Warn
Warn(v ...interface{}) error
// Error formats message using the default formats for its operands
// and writes to log with level = Error
Error(v ...interface{}) error
// Critical formats message using the default formats for its operands
// and writes to log with level = Critical
Critical(v ...interface{}) error
traceWithCallDepth(callDepth int, message fmt.Stringer)
debugWithCallDepth(callDepth int, message fmt.Stringer)
infoWithCallDepth(callDepth int, message fmt.Stringer)
warnWithCallDepth(callDepth int, message fmt.Stringer)
errorWithCallDepth(callDepth int, message fmt.Stringer)
criticalWithCallDepth(callDepth int, message fmt.Stringer)
// Close flushes all the messages in the logger and closes it. It cannot be used after this operation.
Close()
// Flush flushes all the messages in the logger.
Flush()
// Closed returns true if the logger was previously closed.
Closed() bool
// SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller
// when getting function information needed to print seelog format identifiers such as %Func or %File.
//
// This func may be used when you wrap seelog funcs and want to print caller info of you own
// wrappers instead of seelog func callers. In this case you should set depth = 1. If you then
// wrap your wrapper, you should set depth = 2, etc.
//
// NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect
// function/file names in log files. Do not use it if you are not going to wrap seelog funcs.
// You may reset the value to default using a SetAdditionalStackDepth(0) call.
SetAdditionalStackDepth(depth int) error
// Sets logger context that can be used in formatter funcs and custom receivers
SetContext(context interface{})
}
// innerLoggerInterface is an internal logging interface
type innerLoggerInterface interface {
innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer)
Flush()
}
// [file path][func name][level] -> [allowed]
type allowedContextCache map[string]map[string]map[LogLevel]bool
// commonLogger contains all common data needed for logging and contains methods used to log messages.
type commonLogger struct {
config *logConfig // Config used for logging
contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets
closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. Must be accessed while holding closedM
closedM sync.RWMutex
m sync.Mutex // Mutex for main operations
unusedLevels []bool
innerLogger innerLoggerInterface
addStackDepth int // Additional stack depth needed for correct seelog caller context detection
customContext interface{}
}
func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger {
cLogger := new(commonLogger)
cLogger.config = config
cLogger.contextCache = make(allowedContextCache)
cLogger.unusedLevels = make([]bool, Off)
cLogger.fillUnusedLevels()
cLogger.innerLogger = internalLogger
return cLogger
}
func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error {
if depth < 0 {
return fmt.Errorf("negative depth: %d", depth)
}
cLogger.m.Lock()
cLogger.addStackDepth = depth
cLogger.m.Unlock()
return nil
}
func (cLogger *commonLogger) Tracef(format string, params ...interface{}) {
cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Debugf(format string, params ...interface{}) {
cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Infof(format string, params ...interface{}) {
cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params))
}
func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.warnWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.errorWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error {
message := newLogFormattedMessage(format, params)
cLogger.criticalWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Trace(v ...interface{}) {
cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Debug(v ...interface{}) {
cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Info(v ...interface{}) {
cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v))
}
func (cLogger *commonLogger) Warn(v ...interface{}) error {
message := newLogMessage(v)
cLogger.warnWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Error(v ...interface{}) error {
message := newLogMessage(v)
cLogger.errorWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) Critical(v ...interface{}) error {
message := newLogMessage(v)
cLogger.criticalWithCallDepth(loggerFuncCallDepth, message)
return errors.New(message.String())
}
func (cLogger *commonLogger) SetContext(c interface{}) {
cLogger.customContext = c
}
func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(TraceLvl, message, callDepth)
}
func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(DebugLvl, message, callDepth)
}
func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(InfoLvl, message, callDepth)
}
func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(WarnLvl, message, callDepth)
}
func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(ErrorLvl, message, callDepth)
}
func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) {
cLogger.log(CriticalLvl, message, callDepth)
cLogger.innerLogger.Flush()
}
func (cLogger *commonLogger) Closed() bool {
cLogger.closedM.RLock()
defer cLogger.closedM.RUnlock()
return cLogger.closed
}
func (cLogger *commonLogger) fillUnusedLevels() {
for i := 0; i < len(cLogger.unusedLevels); i++ {
cLogger.unusedLevels[i] = true
}
cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints)
for _, exception := range cLogger.config.Exceptions {
cLogger.fillUnusedLevelsByContraint(exception)
}
}
func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) {
for i := 0; i < len(cLogger.unusedLevels); i++ {
if constraint.IsAllowed(LogLevel(i)) {
cLogger.unusedLevels[i] = false
}
}
}
// stackCallDepth is used to indicate the call depth of 'log' func.
// This depth level is used in the runtime.Caller(...) call. See
// common_context.go -> specifyContext, extractCallerInfo for details.
func (cLogger *commonLogger) log(level LogLevel, message fmt.Stringer, stackCallDepth int) {
if cLogger.unusedLevels[level] {
return
}
cLogger.m.Lock()
defer cLogger.m.Unlock()
if cLogger.Closed() {
return
}
context, _ := specifyContext(stackCallDepth+cLogger.addStackDepth, cLogger.customContext)
// Context errors are not reported because there are situations
// in which context errors are normal Seelog usage cases. For
// example in executables with stripped symbols.
// Error contexts are returned instead. See common_context.go.
/*if err != nil {
reportInternalError(err)
return
}*/
cLogger.innerLogger.innerLog(level, context, message)
}
func (cLogger *commonLogger) processLogMsg(level LogLevel, message fmt.Stringer, context LogContextInterface) {
defer func() {
if err := recover(); err != nil {
reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err))
}
}()
if cLogger.config.IsAllowed(level, context) {
cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError)
}
}
func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool {
funcMap, ok := cLogger.contextCache[context.FullPath()]
if !ok {
funcMap = make(map[string]map[LogLevel]bool, 0)
cLogger.contextCache[context.FullPath()] = funcMap
}
levelMap, ok := funcMap[context.Func()]
if !ok {
levelMap = make(map[LogLevel]bool, 0)
funcMap[context.Func()] = levelMap
}
isAllowValue, ok := levelMap[level]
if !ok {
isAllowValue = cLogger.config.IsAllowed(level, context)
levelMap[level] = isAllowValue
}
return isAllowValue
}
type logMessage struct {
params []interface{}
}
type logFormattedMessage struct {
format string
params []interface{}
}
func newLogMessage(params []interface{}) fmt.Stringer {
message := new(logMessage)
message.params = params
return message
}
func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage {
message := new(logFormattedMessage)
message.params = params
message.format = format
return message
}
func (message *logMessage) String() string {
return fmt.Sprint(message.params...)
}
func (message *logFormattedMessage) String() string {
return fmt.Sprintf(message.format, message.params...)
}

View File

@ -0,0 +1,161 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"bufio"
"errors"
"fmt"
"io"
"sync"
"time"
)
// bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full
type bufferedWriter struct {
flushPeriod time.Duration // data flushes interval (in microseconds)
bufferMutex *sync.Mutex // mutex for buffer operations syncronization
innerWriter io.Writer // inner writer
buffer *bufio.Writer // buffered wrapper for inner writer
bufferSize int // max size of data chunk in bytes
}
// NewBufferedWriter creates a new buffered writer struct.
// bufferSize -- size of memory buffer in bytes
// flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality
func NewBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) {
if innerWriter == nil {
return nil, errors.New("argument is nil: innerWriter")
}
if flushPeriod < 0 {
return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod)
}
if bufferSize <= 0 {
return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize)
}
buffer := bufio.NewWriterSize(innerWriter, bufferSize)
/*if err != nil {
return nil, err
}*/
newWriter := new(bufferedWriter)
newWriter.innerWriter = innerWriter
newWriter.buffer = buffer
newWriter.bufferSize = bufferSize
newWriter.flushPeriod = flushPeriod * 1e6
newWriter.bufferMutex = new(sync.Mutex)
if flushPeriod != 0 {
go newWriter.flushPeriodically()
}
return newWriter, nil
}
func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) {
bufferedLen := bufWriter.buffer.Buffered()
n, err = bufWriter.flushInner()
if err != nil {
return
}
written, writeErr := bufWriter.innerWriter.Write(bytes)
return bufferedLen + written, writeErr
}
// Sends data to buffer manager. Waits until all buffers are full.
func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bytesLen := len(bytes)
if bytesLen > bufWriter.bufferSize {
return bufWriter.writeBigChunk(bytes)
}
if bytesLen > bufWriter.buffer.Available() {
n, err = bufWriter.flushInner()
if err != nil {
return
}
}
bufWriter.buffer.Write(bytes)
return len(bytes), nil
}
func (bufWriter *bufferedWriter) Close() error {
closer, ok := bufWriter.innerWriter.(io.Closer)
if ok {
return closer.Close()
}
return nil
}
func (bufWriter *bufferedWriter) Flush() {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bufWriter.flushInner()
}
func (bufWriter *bufferedWriter) flushInner() (n int, err error) {
bufferedLen := bufWriter.buffer.Buffered()
flushErr := bufWriter.buffer.Flush()
return bufWriter.buffer.Buffered() - bufferedLen, flushErr
}
func (bufWriter *bufferedWriter) flushBuffer() {
bufWriter.bufferMutex.Lock()
defer bufWriter.bufferMutex.Unlock()
bufWriter.buffer.Flush()
}
func (bufWriter *bufferedWriter) flushPeriodically() {
if bufWriter.flushPeriod > 0 {
ticker := time.NewTicker(bufWriter.flushPeriod)
for {
<-ticker.C
bufWriter.flushBuffer()
}
}
}
func (bufWriter *bufferedWriter) String() string {
return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod)
}

144
vendor/github.com/cihub/seelog/writers_connwriter.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"crypto/tls"
"fmt"
"io"
"net"
)
// connWriter is used to write to a stream-oriented network connection.
type connWriter struct {
innerWriter io.WriteCloser
reconnectOnMsg bool
reconnect bool
net string
addr string
useTLS bool
configTLS *tls.Config
}
// Creates writer to the address addr on the network netName.
// Connection will be opened on each write if reconnectOnMsg = true
func NewConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter {
newWriter := new(connWriter)
newWriter.net = netName
newWriter.addr = addr
newWriter.reconnectOnMsg = reconnectOnMsg
return newWriter
}
// Creates a writer that uses SSL/TLS
func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter {
newWriter := new(connWriter)
newWriter.net = netName
newWriter.addr = addr
newWriter.reconnectOnMsg = reconnectOnMsg
newWriter.useTLS = true
newWriter.configTLS = config
return newWriter
}
func (connWriter *connWriter) Close() error {
if connWriter.innerWriter == nil {
return nil
}
return connWriter.innerWriter.Close()
}
func (connWriter *connWriter) Write(bytes []byte) (n int, err error) {
if connWriter.neededConnectOnMsg() {
err = connWriter.connect()
if err != nil {
return 0, err
}
}
if connWriter.reconnectOnMsg {
defer connWriter.innerWriter.Close()
}
n, err = connWriter.innerWriter.Write(bytes)
if err != nil {
connWriter.reconnect = true
}
return
}
func (connWriter *connWriter) String() string {
return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg)
}
func (connWriter *connWriter) connect() error {
if connWriter.innerWriter != nil {
connWriter.innerWriter.Close()
connWriter.innerWriter = nil
}
if connWriter.useTLS {
conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS)
if err != nil {
return err
}
connWriter.innerWriter = conn
return nil
}
conn, err := net.Dial(connWriter.net, connWriter.addr)
if err != nil {
return err
}
tcpConn, ok := conn.(*net.TCPConn)
if ok {
tcpConn.SetKeepAlive(true)
}
connWriter.innerWriter = conn
return nil
}
func (connWriter *connWriter) neededConnectOnMsg() bool {
if connWriter.reconnect {
connWriter.reconnect = false
return true
}
if connWriter.innerWriter == nil {
return true
}
return connWriter.reconnectOnMsg
}

View File

@ -0,0 +1,47 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import "fmt"
// consoleWriter is used to write to console
type consoleWriter struct {
}
// Creates a new console writer. Returns error, if the console writer couldn't be created.
func NewConsoleWriter() (writer *consoleWriter, err error) {
newWriter := new(consoleWriter)
return newWriter, nil
}
// Create folder and file on WriteLog/Write first call
func (console *consoleWriter) Write(bytes []byte) (int, error) {
return fmt.Print(string(bytes))
}
func (console *consoleWriter) String() string {
return "Console writer"
}

92
vendor/github.com/cihub/seelog/writers_filewriter.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io"
"os"
"path/filepath"
)
// fileWriter is used to write to a file.
type fileWriter struct {
innerWriter io.WriteCloser
fileName string
}
// Creates a new file and a corresponding writer. Returns error, if the file couldn't be created.
func NewFileWriter(fileName string) (writer *fileWriter, err error) {
newWriter := new(fileWriter)
newWriter.fileName = fileName
return newWriter, nil
}
func (fw *fileWriter) Close() error {
if fw.innerWriter != nil {
err := fw.innerWriter.Close()
if err != nil {
return err
}
fw.innerWriter = nil
}
return nil
}
// Create folder and file on WriteLog/Write first call
func (fw *fileWriter) Write(bytes []byte) (n int, err error) {
if fw.innerWriter == nil {
if err := fw.createFile(); err != nil {
return 0, err
}
}
return fw.innerWriter.Write(bytes)
}
func (fw *fileWriter) createFile() error {
folder, _ := filepath.Split(fw.fileName)
var err error
if 0 != len(folder) {
err = os.MkdirAll(folder, defaultDirectoryPermissions)
if err != nil {
return err
}
}
// If exists
fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions)
if err != nil {
return err
}
return nil
}
func (fw *fileWriter) String() string {
return fmt.Sprintf("File writer: %s", fw.fileName)
}

View File

@ -0,0 +1,62 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"errors"
"fmt"
"io"
)
type formattedWriter struct {
writer io.Writer
formatter *formatter
}
func NewFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) {
if formatter == nil {
return nil, errors.New("formatter can not be nil")
}
return &formattedWriter{writer, formatter}, nil
}
func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error {
str := formattedWriter.formatter.Format(message, level, context)
_, err := formattedWriter.writer.Write([]byte(str))
return err
}
func (formattedWriter *formattedWriter) String() string {
return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter)
}
func (formattedWriter *formattedWriter) Writer() io.Writer {
return formattedWriter.writer
}
func (formattedWriter *formattedWriter) Format() *formatter {
return formattedWriter.formatter
}

View File

@ -0,0 +1,782 @@
// Copyright (c) 2013 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/cihub/seelog/archive"
"github.com/cihub/seelog/archive/gzip"
"github.com/cihub/seelog/archive/tar"
"github.com/cihub/seelog/archive/zip"
)
// Common constants
const (
rollingLogHistoryDelimiter = "."
)
// Types of the rolling writer: roll by date, by time, etc.
type rollingType uint8
const (
rollingTypeSize = iota
rollingTypeTime
)
// Types of the rolled file naming mode: prefix, postfix, etc.
type rollingNameMode uint8
const (
rollingNameModePostfix = iota
rollingNameModePrefix
)
var rollingNameModesStringRepresentation = map[rollingNameMode]string{
rollingNameModePostfix: "postfix",
rollingNameModePrefix: "prefix",
}
func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) {
for tp, tpStr := range rollingNameModesStringRepresentation {
if tpStr == rollingNameStr {
return tp, true
}
}
return 0, false
}
var rollingTypesStringRepresentation = map[rollingType]string{
rollingTypeSize: "size",
rollingTypeTime: "date",
}
func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) {
for tp, tpStr := range rollingTypesStringRepresentation {
if tpStr == rollingTypeStr {
return tp, true
}
}
return 0, false
}
// Old logs archivation type.
type rollingArchiveType uint8
const (
rollingArchiveNone = iota
rollingArchiveZip
rollingArchiveGzip
)
var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{
rollingArchiveNone: "none",
rollingArchiveZip: "zip",
rollingArchiveGzip: "gzip",
}
type archiver func(f *os.File, exploded bool) archive.WriteCloser
type unarchiver func(f *os.File) (archive.ReadCloser, error)
type compressionType struct {
extension string
handleMultipleEntries bool
archiver archiver
unarchiver unarchiver
}
var compressionTypes = map[rollingArchiveType]compressionType{
rollingArchiveZip: {
extension: ".zip",
handleMultipleEntries: true,
archiver: func(f *os.File, _ bool) archive.WriteCloser {
return zip.NewWriter(f)
},
unarchiver: func(f *os.File) (archive.ReadCloser, error) {
fi, err := f.Stat()
if err != nil {
return nil, err
}
r, err := zip.NewReader(f, fi.Size())
if err != nil {
return nil, err
}
return archive.NopCloser(r), nil
},
},
rollingArchiveGzip: {
extension: ".gz",
handleMultipleEntries: false,
archiver: func(f *os.File, exploded bool) archive.WriteCloser {
gw := gzip.NewWriter(f)
if exploded {
return gw
}
return tar.NewWriteMultiCloser(gw, gw)
},
unarchiver: func(f *os.File) (archive.ReadCloser, error) {
gr, err := gzip.NewReader(f, f.Name())
if err != nil {
return nil, err
}
// Determine if the gzip is a tar
tr := tar.NewReader(gr)
_, err = tr.Next()
isTar := err == nil
// Reset to beginning of file
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return nil, err
}
gr.Reset(f)
if isTar {
return archive.NopCloser(tar.NewReader(gr)), nil
}
return gr, nil
},
},
}
func (compressionType *compressionType) rollingArchiveTypeName(name string, exploded bool) string {
if !compressionType.handleMultipleEntries && !exploded {
return name + ".tar" + compressionType.extension
} else {
return name + compressionType.extension
}
}
func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) {
for tp, tpStr := range rollingArchiveTypesStringRepresentation {
if tpStr == rollingArchiveTypeStr {
return tp, true
}
}
return 0, false
}
// Default names for different archive types
var rollingArchiveDefaultExplodedName = "old"
func rollingArchiveTypeDefaultName(archiveType rollingArchiveType, exploded bool) (string, error) {
compressionType, ok := compressionTypes[archiveType]
if !ok {
return "", fmt.Errorf("cannot get default filename for archive type = %v", archiveType)
}
return compressionType.rollingArchiveTypeName("log", exploded), nil
}
type rollInfo struct {
Name string
Time time.Time
}
// rollerVirtual is an interface that represents all virtual funcs that are
// called in different rolling writer subtypes.
type rollerVirtual interface {
needsToRoll(lastRollTime time.Time) (bool, error) // Returns true if needs to switch to another file.
isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok.
sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger.
// Creates a new froll history file using the contents of current file and special filename of the latest roll (prefix/ postfix).
// If lastRollName is empty (""), then it means that there is no latest roll (current is the first one)
getNewHistoryRollFileName(lastRoll rollInfo) string
getCurrentFileName() string
}
// rollingFileWriter writes received messages to a file, until time interval passes
// or file exceeds a specified limit. After that the current log file is renamed
// and writer starts to log into a new file. You can set a limit for such renamed
// files count, if you want, and then the rolling writer would delete older ones when
// the files count exceed the specified limit.
type rollingFileWriter struct {
fileName string // log file name
currentDirPath string
currentFile *os.File
currentName string
currentFileSize int64
rollingType rollingType // Rolling mode (Files roll by size/date/...)
archiveType rollingArchiveType
archivePath string
archiveExploded bool
fullName bool
maxRolls int
nameMode rollingNameMode
self rollerVirtual // Used for virtual calls
}
func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode,
archiveExploded bool, fullName bool) (*rollingFileWriter, error) {
rw := new(rollingFileWriter)
rw.currentDirPath, rw.fileName = filepath.Split(fpath)
if len(rw.currentDirPath) == 0 {
rw.currentDirPath = "."
}
rw.rollingType = rtype
rw.archiveType = atype
rw.archivePath = apath
rw.nameMode = namemode
rw.maxRolls = maxr
rw.archiveExploded = archiveExploded
rw.fullName = fullName
return rw, nil
}
func (rw *rollingFileWriter) hasRollName(file string) bool {
switch rw.nameMode {
case rollingNameModePostfix:
rname := rw.fileName + rollingLogHistoryDelimiter
return strings.HasPrefix(file, rname)
case rollingNameModePrefix:
rname := rollingLogHistoryDelimiter + rw.fileName
return strings.HasSuffix(file, rname)
}
return false
}
func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string {
switch rw.nameMode {
case rollingNameModePostfix:
return originalName + rollingLogHistoryDelimiter + rollname
case rollingNameModePrefix:
return rollname + rollingLogHistoryDelimiter + originalName
}
return ""
}
func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) {
files, err := getDirFilePaths(rw.currentDirPath, nil, true)
if err != nil {
return nil, err
}
var validRollNames []string
for _, file := range files {
if rw.hasRollName(file) {
rname := rw.getFileRollName(file)
if rw.self.isFileRollNameValid(rname) {
validRollNames = append(validRollNames, rname)
}
}
}
sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames)
if err != nil {
return nil, err
}
validSortedFiles := make([]string, len(sortedTails))
for i, v := range sortedTails {
validSortedFiles[i] = rw.createFullFileName(rw.fileName, v)
}
return validSortedFiles, nil
}
func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error {
var err error
if len(rw.currentDirPath) != 0 {
err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions)
if err != nil {
return err
}
}
rw.currentName = rw.self.getCurrentFileName()
filePath := filepath.Join(rw.currentDirPath, rw.currentName)
// If exists
stat, err := os.Lstat(filePath)
if err == nil {
rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, defaultFilePermissions)
if err != nil {
return err
}
stat, err = os.Lstat(filePath)
if err != nil {
return err
}
rw.currentFileSize = stat.Size()
} else {
rw.currentFile, err = os.Create(filePath)
rw.currentFileSize = 0
}
if err != nil {
return err
}
return nil
}
func (rw *rollingFileWriter) archiveExplodedLogs(logFilename string, compressionType compressionType) (err error) {
closeWithError := func(c io.Closer) {
if cerr := c.Close(); cerr != nil && err == nil {
err = cerr
}
}
rollPath := filepath.Join(rw.currentDirPath, logFilename)
src, err := os.Open(rollPath)
if err != nil {
return err
}
defer src.Close() // Read-only
// Buffer to a temporary file on the same partition
// Note: archivePath is a path to a directory when handling exploded logs
dst, err := rw.tempArchiveFile(rw.archivePath)
if err != nil {
return err
}
defer func() {
closeWithError(dst)
if err != nil {
os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file
return
}
// Finalize archive by swapping the buffered archive into place
err = os.Rename(dst.Name(), filepath.Join(rw.archivePath,
compressionType.rollingArchiveTypeName(logFilename, true)))
}()
// archive entry
w := compressionType.archiver(dst, true)
defer closeWithError(w)
fi, err := src.Stat()
if err != nil {
return err
}
if err := w.NextFile(logFilename, fi); err != nil {
return err
}
_, err = io.Copy(w, src)
return err
}
func (rw *rollingFileWriter) archiveUnexplodedLogs(compressionType compressionType, rollsToDelete int, history []string) (err error) {
closeWithError := func(c io.Closer) {
if cerr := c.Close(); cerr != nil && err == nil {
err = cerr
}
}
// Buffer to a temporary file on the same partition
// Note: archivePath is a path to a file when handling unexploded logs
dst, err := rw.tempArchiveFile(filepath.Dir(rw.archivePath))
if err != nil {
return err
}
defer func() {
closeWithError(dst)
if err != nil {
os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file
return
}
// Finalize archive by moving the buffered archive into place
err = os.Rename(dst.Name(), rw.archivePath)
}()
w := compressionType.archiver(dst, false)
defer closeWithError(w)
src, err := os.Open(rw.archivePath)
switch {
// Archive exists
case err == nil:
defer src.Close() // Read-only
r, err := compressionType.unarchiver(src)
if err != nil {
return err
}
defer r.Close() // Read-only
if err := archive.Copy(w, r); err != nil {
return err
}
// Failed to stat
case !os.IsNotExist(err):
return err
}
// Add new files to the archive
for i := 0; i < rollsToDelete; i++ {
rollPath := filepath.Join(rw.currentDirPath, history[i])
src, err := os.Open(rollPath)
if err != nil {
return err
}
defer src.Close() // Read-only
fi, err := src.Stat()
if err != nil {
return err
}
if err := w.NextFile(src.Name(), fi); err != nil {
return err
}
if _, err := io.Copy(w, src); err != nil {
return err
}
}
return nil
}
func (rw *rollingFileWriter) deleteOldRolls(history []string) error {
if rw.maxRolls <= 0 {
return nil
}
rollsToDelete := len(history) - rw.maxRolls
if rollsToDelete <= 0 {
return nil
}
if rw.archiveType != rollingArchiveNone {
if rw.archiveExploded {
os.MkdirAll(rw.archivePath, defaultDirectoryPermissions)
// Archive logs
for i := 0; i < rollsToDelete; i++ {
rw.archiveExplodedLogs(history[i], compressionTypes[rw.archiveType])
}
} else {
os.MkdirAll(filepath.Dir(rw.archivePath), defaultDirectoryPermissions)
rw.archiveUnexplodedLogs(compressionTypes[rw.archiveType], rollsToDelete, history)
}
}
var err error
// In all cases (archive files or not) the files should be deleted.
for i := 0; i < rollsToDelete; i++ {
// Try best to delete files without breaking the loop.
if err = tryRemoveFile(filepath.Join(rw.currentDirPath, history[i])); err != nil {
reportInternalError(err)
}
}
return nil
}
func (rw *rollingFileWriter) getFileRollName(fileName string) string {
switch rw.nameMode {
case rollingNameModePostfix:
return fileName[len(rw.fileName+rollingLogHistoryDelimiter):]
case rollingNameModePrefix:
return fileName[:len(fileName)-len(rw.fileName+rollingLogHistoryDelimiter)]
}
return ""
}
func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) {
if rw.currentFile == nil {
err := rw.createFileAndFolderIfNeeded(true)
if err != nil {
return 0, err
}
}
// needs to roll if:
// * file roller max file size exceeded OR
// * time roller interval passed
fi, err := rw.currentFile.Stat()
if err != nil {
return 0, err
}
lastRollTime := fi.ModTime()
nr, err := rw.self.needsToRoll(lastRollTime)
if err != nil {
return 0, err
}
if nr {
// First, close current file.
err = rw.currentFile.Close()
if err != nil {
return 0, err
}
// Current history of all previous log files.
// For file roller it may be like this:
// * ...
// * file.log.4
// * file.log.5
// * file.log.6
//
// For date roller it may look like this:
// * ...
// * file.log.11.Aug.13
// * file.log.15.Aug.13
// * file.log.16.Aug.13
// Sorted log history does NOT include current file.
history, err := rw.getSortedLogHistory()
if err != nil {
return 0, err
}
// Renames current file to create a new roll history entry
// For file roller it may be like this:
// * ...
// * file.log.4
// * file.log.5
// * file.log.6
// n file.log.7 <---- RENAMED (from file.log)
// Time rollers that doesn't modify file names (e.g. 'date' roller) skip this logic.
var newHistoryName string
lastRoll := rollInfo{
Time: lastRollTime,
}
if len(history) > 0 {
// Create new rname name using last history file name
lastRoll.Name = rw.getFileRollName(history[len(history)-1])
} else {
// Create first rname name
lastRoll.Name = ""
}
newRollMarkerName := rw.self.getNewHistoryRollFileName(lastRoll)
if len(newRollMarkerName) != 0 {
newHistoryName = rw.createFullFileName(rw.fileName, newRollMarkerName)
} else {
newHistoryName = rw.fileName
}
if newHistoryName != rw.fileName {
err = os.Rename(filepath.Join(rw.currentDirPath, rw.currentName), filepath.Join(rw.currentDirPath, newHistoryName))
if err != nil {
return 0, err
}
}
// Finally, add the newly added history file to the history archive
// and, if after that the archive exceeds the allowed max limit, older rolls
// must the removed/archived.
history = append(history, newHistoryName)
if len(history) > rw.maxRolls {
err = rw.deleteOldRolls(history)
if err != nil {
return 0, err
}
}
err = rw.createFileAndFolderIfNeeded(false)
if err != nil {
return 0, err
}
}
rw.currentFileSize += int64(len(bytes))
return rw.currentFile.Write(bytes)
}
func (rw *rollingFileWriter) Close() error {
if rw.currentFile != nil {
e := rw.currentFile.Close()
if e != nil {
return e
}
rw.currentFile = nil
}
return nil
}
func (rw *rollingFileWriter) tempArchiveFile(archiveDir string) (*os.File, error) {
tmp := filepath.Join(archiveDir, ".seelog_tmp")
if err := os.MkdirAll(tmp, defaultDirectoryPermissions); err != nil {
return nil, err
}
return ioutil.TempFile(tmp, "archived_logs")
}
// =============================================================================================
// Different types of rolling writers
// =============================================================================================
// --------------------------------------------------
// Rolling writer by SIZE
// --------------------------------------------------
// rollingFileWriterSize performs roll when file exceeds a specified limit.
type rollingFileWriterSize struct {
*rollingFileWriter
maxFileSize int64
}
func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode, archiveExploded bool) (*rollingFileWriterSize, error) {
rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode, archiveExploded, false)
if err != nil {
return nil, err
}
rws := &rollingFileWriterSize{rw, maxSize}
rws.self = rws
return rws, nil
}
func (rws *rollingFileWriterSize) needsToRoll(lastRollTime time.Time) (bool, error) {
return rws.currentFileSize >= rws.maxFileSize, nil
}
func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool {
if len(rname) == 0 {
return false
}
_, err := strconv.Atoi(rname)
return err == nil
}
type rollSizeFileTailsSlice []string
func (p rollSizeFileTailsSlice) Len() int {
return len(p)
}
func (p rollSizeFileTailsSlice) Less(i, j int) bool {
v1, _ := strconv.Atoi(p[i])
v2, _ := strconv.Atoi(p[j])
return v1 < v2
}
func (p rollSizeFileTailsSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollSizeFileTailsSlice(fs)
sort.Sort(ss)
return ss, nil
}
func (rws *rollingFileWriterSize) getNewHistoryRollFileName(lastRoll rollInfo) string {
v := 0
if len(lastRoll.Name) != 0 {
v, _ = strconv.Atoi(lastRoll.Name)
}
return fmt.Sprintf("%d", v+1)
}
func (rws *rollingFileWriterSize) getCurrentFileName() string {
return rws.fileName
}
func (rws *rollingFileWriterSize) String() string {
return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v",
rws.fileName,
rollingArchiveTypesStringRepresentation[rws.archiveType],
rws.archivePath,
rws.maxFileSize,
rws.maxRolls)
}
// --------------------------------------------------
// Rolling writer by TIME
// --------------------------------------------------
// rollingFileWriterTime performs roll when a specified time interval has passed.
type rollingFileWriterTime struct {
*rollingFileWriter
timePattern string
currentTimeFileName string
}
func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int,
timePattern string, namemode rollingNameMode, archiveExploded bool, fullName bool) (*rollingFileWriterTime, error) {
rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode, archiveExploded, fullName)
if err != nil {
return nil, err
}
rws := &rollingFileWriterTime{rw, timePattern, ""}
rws.self = rws
return rws, nil
}
func (rwt *rollingFileWriterTime) needsToRoll(lastRollTime time.Time) (bool, error) {
if time.Now().Format(rwt.timePattern) == lastRollTime.Format(rwt.timePattern) {
return false, nil
}
return true, nil
}
func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool {
if len(rname) == 0 {
return false
}
_, err := time.ParseInLocation(rwt.timePattern, rname, time.Local)
return err == nil
}
type rollTimeFileTailsSlice struct {
data []string
pattern string
}
func (p rollTimeFileTailsSlice) Len() int {
return len(p.data)
}
func (p rollTimeFileTailsSlice) Less(i, j int) bool {
t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local)
t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local)
return t1.Before(t2)
}
func (p rollTimeFileTailsSlice) Swap(i, j int) {
p.data[i], p.data[j] = p.data[j], p.data[i]
}
func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern}
sort.Sort(ss)
return ss.data, nil
}
func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(lastRoll rollInfo) string {
return lastRoll.Time.Format(rwt.timePattern)
}
func (rwt *rollingFileWriterTime) getCurrentFileName() string {
if rwt.fullName {
return rwt.createFullFileName(rwt.fileName, time.Now().Format(rwt.timePattern))
}
return rwt.fileName
}
func (rwt *rollingFileWriterTime) String() string {
return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, pattern: %s, maxRolls: %v",
rwt.fileName,
rollingArchiveTypesStringRepresentation[rwt.archiveType],
rwt.archivePath,
rwt.timePattern,
rwt.maxRolls)
}

214
vendor/github.com/cihub/seelog/writers_smtpwriter.go generated vendored Normal file
View File

@ -0,0 +1,214 @@
// Copyright (c) 2012 - Cloud Instruments Co., Ltd.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package seelog
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"net/smtp"
"path/filepath"
"strings"
)
const (
// Default subject phrase for sending emails.
DefaultSubjectPhrase = "Diagnostic message from server: "
// Message subject pattern composed according to RFC 5321.
rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n\n"
)
// smtpWriter is used to send emails via given SMTP-server.
type smtpWriter struct {
auth smtp.Auth
hostName string
hostPort string
hostNameWithPort string
senderAddress string
senderName string
recipientAddresses []string
caCertDirPaths []string
mailHeaders []string
subject string
}
// NewSMTPWriter returns a new SMTP-writer.
func NewSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter {
return &smtpWriter{
auth: smtp.PlainAuth("", un, pwd, hn),
hostName: hn,
hostPort: hp,
hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp),
senderAddress: sa,
senderName: sn,
recipientAddresses: ras,
caCertDirPaths: cacdps,
subject: subj,
mailHeaders: headers,
}
}
func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte {
headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject)
// Build header lines if configured.
if headers != nil && len(headers) > 0 {
headerLines += strings.Join(headers, "\n")
headerLines += "\n"
}
return append([]byte(headerLines), body...)
}
// getTLSConfig gets paths of PEM files with certificates,
// host server name and tries to create an appropriate TLS.Config.
func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) {
if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 {
err = errors.New("invalid PEM file paths")
return
}
pemEncodedContent := []byte{}
var (
e error
bytes []byte
)
// Create a file-filter-by-extension, set aside non-pem files.
pemFilePathFilter := func(fp string) bool {
if filepath.Ext(fp) == ".pem" {
return true
}
return false
}
for _, pemFileDirPath := range pemFileDirPaths {
pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false)
if err != nil {
return nil, err
}
// Put together all the PEM files to decode them as a whole byte slice.
for _, pfp := range pemFilePaths {
if bytes, e = ioutil.ReadFile(pfp); e == nil {
pemEncodedContent = append(pemEncodedContent, bytes...)
} else {
return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error())
}
}
}
config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName}
isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent)
if !isAppended {
// Extract this into a separate error.
err = errors.New("invalid PEM content")
return
}
return
}
// SendMail accepts TLS configuration, connects to the server at addr,
// switches to TLS if possible, authenticates with mechanism a if possible,
// and then sends an email from address from, to addresses to, with message msg.
func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error {
c, err := smtp.Dial(addr)
if err != nil {
return err
}
// Check if the server supports STARTTLS extension.
if ok, _ := c.Extension("STARTTLS"); ok {
if err = c.StartTLS(config); err != nil {
return err
}
}
// Check if the server supports AUTH extension and use given smtp.Auth.
if a != nil {
if isSupported, _ := c.Extension("AUTH"); isSupported {
if err = c.Auth(a); err != nil {
return err
}
}
}
// Portion of code from the official smtp.SendMail function,
// see http://golang.org/src/pkg/net/smtp/smtp.go.
if err = c.Mail(from); err != nil {
return err
}
for _, addr := range to {
if err = c.Rcpt(addr); err != nil {
return err
}
}
w, err := c.Data()
if err != nil {
return err
}
_, err = w.Write(msg)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
return c.Quit()
}
// Write pushes a text message properly composed according to RFC 5321
// to a post server, which sends it to the recipients.
func (smtpw *smtpWriter) Write(data []byte) (int, error) {
var err error
if smtpw.caCertDirPaths == nil {
err = smtp.SendMail(
smtpw.hostNameWithPort,
smtpw.auth,
smtpw.senderAddress,
smtpw.recipientAddresses,
prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),
)
} else {
config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName)
if e != nil {
return 0, e
}
err = sendMailWithTLSConfig(
config,
smtpw.hostNameWithPort,
smtpw.auth,
smtpw.senderAddress,
smtpw.recipientAddresses,
prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders),
)
}
if err != nil {
return 0, err
}
return len(data), nil
}
// Close closes down SMTP-connection.
func (smtpw *smtpWriter) Close() error {
// Do nothing as Write method opens and closes connection automatically.
return nil
}

20
vendor/github.com/franela/goreq/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Jonathan Leibiusky and Marcos Lilljedahl
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

3
vendor/github.com/franela/goreq/Makefile generated vendored Normal file
View File

@ -0,0 +1,3 @@
test:
go get -v -d -t ./...
go test -v

444
vendor/github.com/franela/goreq/README.md generated vendored Normal file
View File

@ -0,0 +1,444 @@
[![Build Status](https://img.shields.io/travis/franela/goreq/master.svg)](https://travis-ci.org/franela/goreq)
[![GoDoc](https://godoc.org/github.com/franela/goreq?status.svg)](https://godoc.org/github.com/franela/goreq)
GoReq
=======
Simple and sane HTTP request library for Go language.
**Table of Contents**
- [Why GoReq?](#user-content-why-goreq)
- [How do I install it?](#user-content-how-do-i-install-it)
- [What can I do with it?](#user-content-what-can-i-do-with-it)
- [Making requests with different methods](#user-content-making-requests-with-different-methods)
- [GET](#user-content-get)
- [Tags](#user-content-tags)
- [POST](#user-content-post)
- [Sending payloads in the Body](#user-content-sending-payloads-in-the-body)
- [Specifiying request headers](#user-content-specifiying-request-headers)
- [Sending Cookies](#cookie-support)
- [Setting timeouts](#user-content-setting-timeouts)
- [Using the Response and Error](#user-content-using-the-response-and-error)
- [Receiving JSON](#user-content-receiving-json)
- [Sending/Receiving Compressed Payloads](#user-content-sendingreceiving-compressed-payloads)
- [Using gzip compression:](#user-content-using-gzip-compression)
- [Using deflate compression:](#user-content-using-deflate-compression)
- [Using compressed responses:](#user-content-using-compressed-responses)
- [Proxy](#proxy)
- [Debugging requests](#debug)
- [Getting raw Request & Response](#getting-raw-request--response)
- [TODO:](#user-content-todo)
Why GoReq?
==========
Go has very nice native libraries that allows you to do lots of cool things. But sometimes those libraries are too low level, which means that to do a simple thing, like an HTTP Request, it takes some time. And if you want to do something as simple as adding a timeout to a request, you will end up writing several lines of code.
This is why we think GoReq is useful. Because you can do all your HTTP requests in a very simple and comprehensive way, while enabling you to do more advanced stuff by giving you access to the native API.
How do I install it?
====================
```bash
go get github.com/franela/goreq
```
What can I do with it?
======================
## Making requests with different methods
#### GET
```go
res, err := goreq.Request{ Uri: "http://www.google.com" }.Do()
```
GoReq default method is GET.
You can also set value to GET method easily
```go
type Item struct {
Limit int
Skip int
Fields string
}
item := Item {
Limit: 3,
Skip: 5,
Fields: "Value",
}
res, err := goreq.Request{
Uri: "http://localhost:3000/",
QueryString: item,
}.Do()
```
The sample above will send `http://localhost:3000/?limit=3&skip=5&fields=Value`
Alternatively the `url` tag can be used in struct fields to customize encoding properties
```go
type Item struct {
TheLimit int `url:"the_limit"`
TheSkip string `url:"the_skip,omitempty"`
TheFields string `url:"-"`
}
item := Item {
TheLimit: 3,
TheSkip: "",
TheFields: "Value",
}
res, err := goreq.Request{
Uri: "http://localhost:3000/",
QueryString: item,
}.Do()
```
The sample above will send `http://localhost:3000/?the_limit=3`
QueryString also support url.Values
```go
item := url.Values{}
item.Set("Limit", 3)
item.Add("Field", "somefield")
item.Add("Field", "someotherfield")
res, err := goreq.Request{
Uri: "http://localhost:3000/",
QueryString: item,
}.Do()
```
The sample above will send `http://localhost:3000/?limit=3&field=somefield&field=someotherfield`
### Tags
Struct field `url` tag is mainly used as the request parameter name.
Tags can be comma separated multiple values, 1st value is for naming and rest has special meanings.
- special tag for 1st value
- `-`: value is ignored if set this
- special tag for rest 2nd value
- `omitempty`: zero-value is ignored if set this
- `squash`: the fields of embedded struct is used for parameter
#### Tag Examples
```go
type Place struct {
Country string `url:"country"`
City string `url:"city"`
ZipCode string `url:"zipcode,omitempty"`
}
type Person struct {
Place `url:",squash"`
FirstName string `url:"first_name"`
LastName string `url:"last_name"`
Age string `url:"age,omitempty"`
Password string `url:"-"`
}
johnbull := Person{
Place: Place{ // squash the embedded struct value
Country: "UK",
City: "London",
ZipCode: "SW1",
},
FirstName: "John",
LastName: "Doe",
Age: "35",
Password: "my-secret", // ignored for parameter
}
goreq.Request{
Uri: "http://localhost/",
QueryString: johnbull,
}.Do()
// => `http://localhost/?first_name=John&last_name=Doe&age=35&country=UK&city=London&zip_code=SW1`
// age and zipcode will be ignored because of `omitempty`
// but firstname isn't.
samurai := Person{
Place: Place{ // squash the embedded struct value
Country: "Japan",
City: "Tokyo",
},
LastName: "Yagyu",
}
goreq.Request{
Uri: "http://localhost/",
QueryString: samurai,
}.Do()
// => `http://localhost/?first_name=&last_name=yagyu&country=Japan&city=Tokyo`
```
#### POST
```go
res, err := goreq.Request{ Method: "POST", Uri: "http://www.google.com" }.Do()
```
## Sending payloads in the Body
You can send ```string```, ```Reader``` or ```interface{}``` in the body. The first two will be sent as text. The last one will be marshalled to JSON, if possible.
```go
type Item struct {
Id int
Name string
}
item := Item{ Id: 1111, Name: "foobar" }
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
}.Do()
```
## Specifiying request headers
We think that most of the times the request headers that you use are: ```Host```, ```Content-Type```, ```Accept``` and ```User-Agent```. This is why we decided to make it very easy to set these headers.
```go
res, err := goreq.Request{
Uri: "http://www.google.com",
Host: "foobar.com",
Accept: "application/json",
ContentType: "application/json",
UserAgent: "goreq",
}.Do()
```
But sometimes you need to set other headers. You can still do it.
```go
req := goreq.Request{ Uri: "http://www.google.com" }
req.AddHeader("X-Custom", "somevalue")
req.Do()
```
Alternatively you can use the `WithHeader` function to keep the syntax short
```go
res, err = goreq.Request{ Uri: "http://www.google.com" }.WithHeader("X-Custom", "somevalue").Do()
```
## Cookie support
Cookies can be either set at the request level by sending a [CookieJar](http://golang.org/pkg/net/http/cookiejar/) in the `CookieJar` request field
or you can use goreq's one-liner WithCookie method as shown below
```go
res, err := goreq.Request{
Uri: "http://www.google.com",
}.
WithCookie(&http.Cookie{Name: "c1", Value: "v1"}).
Do()
```
## Setting timeouts
GoReq supports 2 kind of timeouts. A general connection timeout and a request specific one. By default the connection timeout is of 1 second. There is no default for request timeout, which means it will wait forever.
You can change the connection timeout doing:
```go
goreq.SetConnectTimeout(100 * time.Millisecond)
```
And specify the request timeout doing:
```go
res, err := goreq.Request{
Uri: "http://www.google.com",
Timeout: 500 * time.Millisecond,
}.Do()
```
## Using the Response and Error
GoReq will always return 2 values: a ```Response``` and an ```Error```.
If ```Error``` is not ```nil``` it means that an error happened while doing the request and you shouldn't use the ```Response``` in any way.
You can check what happened by getting the error message:
```go
fmt.Println(err.Error())
```
And to make it easy to know if it was a timeout error, you can ask the error or return it:
```go
if serr, ok := err.(*goreq.Error); ok {
if serr.Timeout() {
...
}
}
return err
```
If you don't get an error, you can safely use the ```Response```.
```go
res.Uri // return final URL location of the response (fulfilled after redirect was made)
res.StatusCode // return the status code of the response
res.Body // gives you access to the body
res.Body.ToString() // will return the body as a string
res.Header.Get("Content-Type") // gives you access to all the response headers
```
Remember that you should **always** close `res.Body` if it's not `nil`
## Receiving JSON
GoReq will help you to receive and unmarshal JSON.
```go
type Item struct {
Id int
Name string
}
var item Item
res.Body.FromJsonTo(&item)
```
## Sending/Receiving Compressed Payloads
GoReq supports gzip, deflate and zlib compression of requests' body and transparent decompression of responses provided they have a correct `Content-Encoding` header.
#####Using gzip compression:
```go
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
Compression: goreq.Gzip(),
}.Do()
```
#####Using deflate/zlib compression:
```go
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
Compression: goreq.Deflate(),
}.Do()
```
#####Using compressed responses:
If servers replies a correct and matching `Content-Encoding` header (gzip requires `Content-Encoding: gzip` and deflate `Content-Encoding: deflate`) goreq transparently decompresses the response so the previous example should always work:
```go
type Item struct {
Id int
Name string
}
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
Compression: goreq.Gzip(),
}.Do()
var item Item
res.Body.FromJsonTo(&item)
```
If no `Content-Encoding` header is replied by the server GoReq will return the crude response.
## Proxy
If you need to use a proxy for your requests GoReq supports the standard `http_proxy` env variable as well as manually setting the proxy for each request
```go
res, err := goreq.Request{
Method: "GET",
Proxy: "http://myproxy:myproxyport",
Uri: "http://www.google.com",
}.Do()
```
### Proxy basic auth is also supported
```go
res, err := goreq.Request{
Method: "GET",
Proxy: "http://user:pass@myproxy:myproxyport",
Uri: "http://www.google.com",
}.Do()
```
## Debug
If you need to debug your http requests, it can print the http request detail.
```go
res, err := goreq.Request{
Method: "GET",
Uri: "http://www.google.com",
Compression: goreq.Gzip(),
ShowDebug: true,
}.Do()
fmt.Println(res, err)
```
and it will print the log:
```
GET / HTTP/1.1
Host: www.google.com
Accept:
Accept-Encoding: gzip
Content-Encoding: gzip
Content-Type:
```
### Getting raw Request & Response
To get the Request:
```go
req := goreq.Request{
Host: "foobar.com",
}
//req.Request will return a new instance of an http.Request so you can safely use it for something else
request, _ := req.NewRequest()
```
To get the Response:
```go
res, err := goreq.Request{
Method: "GET",
Uri: "http://www.google.com",
Compression: goreq.Gzip(),
ShowDebug: true,
}.Do()
// res.Response will contain the original http.Response structure
fmt.Println(res.Response, err)
```
TODO:
-----
We do have a couple of [issues](https://github.com/franela/goreq/issues) pending we'll be addressing soon. But feel free to
contribute and send us PRs (with tests please :smile:).

491
vendor/github.com/franela/goreq/goreq.go generated vendored Normal file
View File

@ -0,0 +1,491 @@
package goreq
import (
"bufio"
"bytes"
"compress/gzip"
"compress/zlib"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"reflect"
"strings"
"time"
)
type itimeout interface {
Timeout() bool
}
type Request struct {
headers []headerTuple
cookies []*http.Cookie
Method string
Uri string
Body interface{}
QueryString interface{}
Timeout time.Duration
ContentType string
Accept string
Host string
UserAgent string
Insecure bool
MaxRedirects int
RedirectHeaders bool
Proxy string
Compression *compression
BasicAuthUsername string
BasicAuthPassword string
CookieJar http.CookieJar
ShowDebug bool
OnBeforeRequest func(goreq *Request, httpreq *http.Request)
}
type compression struct {
writer func(buffer io.Writer) (io.WriteCloser, error)
reader func(buffer io.Reader) (io.ReadCloser, error)
ContentEncoding string
}
type Response struct {
*http.Response
Uri string
Body *Body
req *http.Request
}
func (r Response) CancelRequest() {
cancelRequest(DefaultTransport, r.req)
}
func cancelRequest(transport interface{}, r *http.Request) {
if tp, ok := transport.(transportRequestCanceler); ok {
tp.CancelRequest(r)
}
}
type headerTuple struct {
name string
value string
}
type Body struct {
reader io.ReadCloser
compressedReader io.ReadCloser
}
type Error struct {
timeout bool
Err error
}
type transportRequestCanceler interface {
CancelRequest(*http.Request)
}
func (e *Error) Timeout() bool {
return e.timeout
}
func (e *Error) Error() string {
return e.Err.Error()
}
func (b *Body) Read(p []byte) (int, error) {
if b.compressedReader != nil {
return b.compressedReader.Read(p)
}
return b.reader.Read(p)
}
func (b *Body) Close() error {
err := b.reader.Close()
if b.compressedReader != nil {
return b.compressedReader.Close()
}
return err
}
func (b *Body) FromJsonTo(o interface{}) error {
return json.NewDecoder(b).Decode(o)
}
func (b *Body) ToString() (string, error) {
body, err := ioutil.ReadAll(b)
if err != nil {
return "", err
}
return string(body), nil
}
func Gzip() *compression {
reader := func(buffer io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(buffer)
}
writer := func(buffer io.Writer) (io.WriteCloser, error) {
return gzip.NewWriter(buffer), nil
}
return &compression{writer: writer, reader: reader, ContentEncoding: "gzip"}
}
func Deflate() *compression {
reader := func(buffer io.Reader) (io.ReadCloser, error) {
return zlib.NewReader(buffer)
}
writer := func(buffer io.Writer) (io.WriteCloser, error) {
return zlib.NewWriter(buffer), nil
}
return &compression{writer: writer, reader: reader, ContentEncoding: "deflate"}
}
func Zlib() *compression {
return Deflate()
}
func paramParse(query interface{}) (string, error) {
switch query.(type) {
case url.Values:
return query.(url.Values).Encode(), nil
case *url.Values:
return query.(*url.Values).Encode(), nil
default:
var v = &url.Values{}
err := paramParseStruct(v, query)
return v.Encode(), err
}
}
func paramParseStruct(v *url.Values, query interface{}) error {
var (
s = reflect.ValueOf(query)
t = reflect.TypeOf(query)
)
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Interface {
s = s.Elem()
t = s.Type()
}
if t.Kind() != reflect.Struct {
return errors.New("Can not parse QueryString.")
}
for i := 0; i < t.NumField(); i++ {
var name string
field := s.Field(i)
typeField := t.Field(i)
if !field.CanInterface() {
continue
}
urlTag := typeField.Tag.Get("url")
if urlTag == "-" {
continue
}
name, opts := parseTag(urlTag)
var omitEmpty, squash bool
omitEmpty = opts.Contains("omitempty")
squash = opts.Contains("squash")
if squash {
err := paramParseStruct(v, field.Interface())
if err != nil {
return err
}
continue
}
if urlTag == "" {
name = strings.ToLower(typeField.Name)
}
if val := fmt.Sprintf("%v", field.Interface()); !(omitEmpty && len(val) == 0) {
v.Add(name, val)
}
}
return nil
}
func prepareRequestBody(b interface{}) (io.Reader, error) {
switch b.(type) {
case string:
// treat is as text
return strings.NewReader(b.(string)), nil
case io.Reader:
// treat is as text
return b.(io.Reader), nil
case []byte:
//treat as byte array
return bytes.NewReader(b.([]byte)), nil
case nil:
return nil, nil
default:
// try to jsonify it
j, err := json.Marshal(b)
if err == nil {
return bytes.NewReader(j), nil
}
return nil, err
}
}
var DefaultDialer = &net.Dialer{Timeout: 1000 * time.Millisecond}
var DefaultTransport http.RoundTripper = &http.Transport{Dial: DefaultDialer.Dial, Proxy: http.ProxyFromEnvironment}
var DefaultClient = &http.Client{Transport: DefaultTransport}
var proxyTransport http.RoundTripper
var proxyClient *http.Client
func SetConnectTimeout(duration time.Duration) {
DefaultDialer.Timeout = duration
}
func (r *Request) AddHeader(name string, value string) {
if r.headers == nil {
r.headers = []headerTuple{}
}
r.headers = append(r.headers, headerTuple{name: name, value: value})
}
func (r Request) WithHeader(name string, value string) Request {
r.AddHeader(name, value)
return r
}
func (r *Request) AddCookie(c *http.Cookie) {
r.cookies = append(r.cookies, c)
}
func (r Request) WithCookie(c *http.Cookie) Request {
r.AddCookie(c)
return r
}
func (r Request) Do() (*Response, error) {
var client = DefaultClient
var transport = DefaultTransport
var resUri string
var redirectFailed bool
r.Method = valueOrDefault(r.Method, "GET")
// use a client with a cookie jar if necessary. We create a new client not
// to modify the default one.
if r.CookieJar != nil {
client = &http.Client{
Transport: transport,
Jar: r.CookieJar,
}
}
if r.Proxy != "" {
proxyUrl, err := url.Parse(r.Proxy)
if err != nil {
// proxy address is in a wrong format
return nil, &Error{Err: err}
}
//If jar is specified new client needs to be built
if proxyTransport == nil || client.Jar != nil {
proxyTransport = &http.Transport{Dial: DefaultDialer.Dial, Proxy: http.ProxyURL(proxyUrl)}
proxyClient = &http.Client{Transport: proxyTransport, Jar: client.Jar}
} else if proxyTransport, ok := proxyTransport.(*http.Transport); ok {
proxyTransport.Proxy = http.ProxyURL(proxyUrl)
}
transport = proxyTransport
client = proxyClient
}
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) > r.MaxRedirects {
redirectFailed = true
return errors.New("Error redirecting. MaxRedirects reached")
}
resUri = req.URL.String()
//By default Golang will not redirect request headers
// https://code.google.com/p/go/issues/detail?id=4800&q=request%20header
if r.RedirectHeaders {
for key, val := range via[0].Header {
req.Header[key] = val
}
}
return nil
}
if transport, ok := transport.(*http.Transport); ok {
if r.Insecure {
if transport.TLSClientConfig != nil {
transport.TLSClientConfig.InsecureSkipVerify = true
} else {
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
} else if transport.TLSClientConfig != nil {
// the default TLS client (when transport.TLSClientConfig==nil) is
// already set to verify, so do nothing in that case
transport.TLSClientConfig.InsecureSkipVerify = false
}
}
req, err := r.NewRequest()
if err != nil {
// we couldn't parse the URL.
return nil, &Error{Err: err}
}
timeout := false
if r.Timeout > 0 {
client.Timeout = r.Timeout
}
if r.ShowDebug {
dump, err := httputil.DumpRequest(req, true)
if err != nil {
log.Println(err)
}
log.Println(string(dump))
}
if r.OnBeforeRequest != nil {
r.OnBeforeRequest(&r, req)
}
res, err := client.Do(req)
if err != nil {
if !timeout {
if t, ok := err.(itimeout); ok {
timeout = t.Timeout()
}
if ue, ok := err.(*url.Error); ok {
if t, ok := ue.Err.(itimeout); ok {
timeout = t.Timeout()
}
}
}
var response *Response
//If redirect fails we still want to return response data
if redirectFailed {
if res != nil {
response = &Response{res, resUri, &Body{reader: res.Body}, req}
} else {
response = &Response{res, resUri, nil, req}
}
}
//If redirect fails and we haven't set a redirect count we shouldn't return an error
if redirectFailed && r.MaxRedirects == 0 {
return response, nil
}
return response, &Error{timeout: timeout, Err: err}
}
if r.Compression != nil && strings.Contains(res.Header.Get("Content-Encoding"), r.Compression.ContentEncoding) {
compressedReader, err := r.Compression.reader(res.Body)
if err != nil {
return nil, &Error{Err: err}
}
return &Response{res, resUri, &Body{reader: res.Body, compressedReader: compressedReader}, req}, nil
}
return &Response{res, resUri, &Body{reader: res.Body}, req}, nil
}
func (r Request) addHeaders(headersMap http.Header) {
if len(r.UserAgent) > 0 {
headersMap.Add("User-Agent", r.UserAgent)
}
if r.Accept != "" {
headersMap.Add("Accept", r.Accept)
}
if r.ContentType != "" {
headersMap.Add("Content-Type", r.ContentType)
}
}
func (r Request) NewRequest() (*http.Request, error) {
b, e := prepareRequestBody(r.Body)
if e != nil {
// there was a problem marshaling the body
return nil, &Error{Err: e}
}
if r.QueryString != nil {
param, e := paramParse(r.QueryString)
if e != nil {
return nil, &Error{Err: e}
}
r.Uri = r.Uri + "?" + param
}
var bodyReader io.Reader
if b != nil && r.Compression != nil {
buffer := bytes.NewBuffer([]byte{})
readBuffer := bufio.NewReader(b)
writer, err := r.Compression.writer(buffer)
if err != nil {
return nil, &Error{Err: err}
}
_, e = readBuffer.WriteTo(writer)
writer.Close()
if e != nil {
return nil, &Error{Err: e}
}
bodyReader = buffer
} else {
bodyReader = b
}
req, err := http.NewRequest(r.Method, r.Uri, bodyReader)
if err != nil {
return nil, err
}
// add headers to the request
req.Host = r.Host
r.addHeaders(req.Header)
if r.Compression != nil {
req.Header.Add("Content-Encoding", r.Compression.ContentEncoding)
req.Header.Add("Accept-Encoding", r.Compression.ContentEncoding)
}
if r.headers != nil {
for _, header := range r.headers {
req.Header.Add(header.name, header.value)
}
}
//use basic auth if required
if r.BasicAuthUsername != "" {
req.SetBasicAuth(r.BasicAuthUsername, r.BasicAuthPassword)
}
for _, c := range r.cookies {
req.AddCookie(c)
}
return req, nil
}
// Return value if nonempty, def otherwise.
func valueOrDefault(value, def string) string {
if value != "" {
return value
}
return def
}

64
vendor/github.com/franela/goreq/tags.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found here: https://github.com/golang/go/blob/master/LICENSE
package goreq
import (
"strings"
"unicode"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}

View File

@ -0,0 +1,10 @@
# How to contribute
Your contributions are more than welcomed at OpsGenie! You can contribute to OpsGenie Go SDK by submitting a pull request.
Before pushing your commits, please make sure you followed the steps described below:
1. Run `go fmt` to format your code.
2. Alternatively you can use [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports), it also formats import lines.
3. [golint](https://github.com/golang/lint) your code to detect style mistakes.
4. [govet](http://godoc.org/golang.org/x/tools/cmd/vet) your code to detect suspicious constructs.

202
vendor/github.com/opsgenie/opsgenie-go-sdk/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 OpsGenie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

5
vendor/github.com/opsgenie/opsgenie-go-sdk/NOTICE generated vendored Normal file
View File

@ -0,0 +1,5 @@
OpsGenie Go SDK
Copyright 2015 OpsGenie
This product includes software developed at
OpsGenie (http://www.opsgenie.com/).

97
vendor/github.com/opsgenie/opsgenie-go-sdk/README.md generated vendored Normal file
View File

@ -0,0 +1,97 @@
# opsgenie-go-sdk
## Aim and Scope
OpsGenie GO SDK aims to access OpsGenie Web API through HTTP calls
from a client application purely written in Go language.
OpsGenie Go SDK covers *the Alert API*, *the Heartbeat API*,
*the Integration API* and *the Policy API*. Future releases
are subject to be delivered for packing more APIs soon.
**Documentation:** [![](https://godoc.org/github.com/nathany/looper?status.svg)](http://godoc.org/github.com/opsgenie/opsgenie-go-sdk/client)
For more information about OpsGenie Go SDK, please refer to [OpsGenie Go API](https://www.opsgenie.com/docs/api-and-client-libraries/opsgenie-go-api) document.
## Pre-requisites
* The API is built using Go 1.4.2. Some features may not be
available or supported unless you have installed a relevant version of Go.
Please click [https://golang.org/dl/](https://golang.org/dl/) to download and
get more information about installing Go on your computer.
* Make sure you have properly set both `GOROOT` and `GOPATH`
environment variables.
* Before you can begin, you need to sign up [OpsGenie](http://www.opsgenie.com) if you
don't have a valid account yet. Create an API Integration and get your API key.
## Installation
To download all packages in the repo with their dependencies, simply run
`go get github.com/opsgenie/opsgenie-go-sdk/...`
## Getting Started
One can start using OpsGenie Go SDK by initializing client and making a request. Example shown below demonstrates how to initialize an OpsGenie Alert client and make a create alert request.
```
package main
import (
"fmt"
alerts "github.com/opsgenie/opsgenie-go-sdk/alerts"
ogcli "github.com/opsgenie/opsgenie-go-sdk/client"
)
func main() {
cli := new(ogcli.OpsGenieClient)
cli.SetAPIKey("YOUR_API_KEY")
alertCli, cliErr := cli.Alert()
if cliErr != nil {
panic(cliErr)
}
// create the alert
req := alerts.CreateAlertRequest{Message: "Hello from OpsGenie Go Sdk"}
response, alertErr := alertCli.Create(req)
if alertErr != nil {
panic(alertErr)
}
fmt.Printf("message: %s\n", response.Message)
fmt.Printf("alert id: %s\n", response.AlertId)
fmt.Printf("status: %s\n", response.Status)
fmt.Printf("code: %d\n", response.Code)
}
```
There are many useful sample code snippets under `samples` directory for packages.
## Handling Zero value problem with 'omitempty' option in Json
Every golang type has a [zero value](http://golang.org/ref/spec#The_zero_value).
AddHeartbeat and UpdateHeartbeat requests have a boolean "Enabled" field to determine a heartbeat is enabled or disabled.
enabled is not a mandatory field in both requests so, it has "omitempty" flag.
When JSON is unmarshalling the requests, it omits the fields contains zero value of its type when this option is set.
The problem starts here:
When you want to set the Enabled field as false, JSON does not unmarshal it because it contains boolean zero value and has option omitempty.
This problem occurs with strings; when you want to reset a heartbeat's description. To set heartbeat's description as empty string, you should make the request with Description :"".
But JSON does not unmarshal it either.
So, to solve this we followed go-github's solution as mentioned [here](https://willnorris.com/2014/05/go-rest-apis-and-pointers).
We used pointers just for booleans, if you want to set a string's value to empty. please use " ", or "-" as new string value.
## The Web API
Please follow the links below for more information and details
about the Web API.
* [Alert API](https://www.opsgenie.com/docs/web-api/alert-api)
* [Heartbeat API](https://www.opsgenie.com/docs/web-api/heartbeat-api)
* [Integration API](https://www.opsgenie.com/docs/web-api/integration-api)
* [Policy API](https://www.opsgenie.com/docs/web-api/policy-api)
## Bug Reporting and Feature Requests
If you like to report a bug, or a feature request; please open an issue.

View File

@ -0,0 +1,221 @@
package alerts
// CreateAlertResponse holds the result data of the CreateAlertRequest
type CreateAlertResponse struct {
Message string `json:"message"`
AlertID string `json:"alertId"`
Status string `json:"status"`
Code int `json:"code"`
}
// CountAlertResponse holds the result data of the CountAlertRequest
type CountAlertResponse struct {
Count int `json:"count"`
}
// CloseAlertResponse holds the result data of the CloseAlertRequest
type CloseAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// DeleteAlertResponse holds the result data of the DeleteAlertRequest
type DeleteAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// ListAlertsResponse holds the result data of the ListAlertsRequest
type ListAlertsResponse struct {
Alerts []struct {
ID string `json:"id"`
Alias string `json:"alias"`
Message string `json:"message"`
Status string `json:"status"`
IsSeen bool `json:"isSeen"`
Acknowledged bool `json:"acknowledged"`
CreatedAt uint64 `json:"createdAt"`
UpdatedAt uint64 `json:"updatedAt"`
TinyID string `json:"tinyId"`
Owner string `json:"owner"`
} `json:"alerts"`
}
// ListAlertNotesResponse holds the result data of the ListAlertNotesRequest
type ListAlertNotesResponse struct {
Took int `json:"took"`
LastKey string `json:"lastKey"`
Notes []struct {
Note string `json:"note"`
Owner string `json:"owner"`
CreatedAt uint64 `json:"createdAt"`
} `json:"notes"`
}
// ListAlertLogsResponse holds the result data of the ListAlertLogsRequest
type ListAlertLogsResponse struct {
LastKey string `json:"lastKey"`
Logs []struct {
Log string `json:"log"`
LogType string `json:"logType"`
Owner string `json:"owner"`
CreatedAt uint64 `json:"createdAt"`
} `json:"logs"`
}
// ListAlertRecipientsResponse holds the result data of the ListAlertRecipientsRequest.
type ListAlertRecipientsResponse struct {
Users []struct {
Username string `json:"username"`
State string `json:"state"`
Method string `json:"method"`
StateChangedAt uint64 `json:"stateChangedAt"`
} `json:"users"`
Groups map[string][]struct {
Username string `json:"username"`
State string `json:"state"`
Method string `json:"method"`
StateChangedAt uint64 `json:"stateChangedAt"`
} `json:"groups"`
}
// AcknowledgeAlertResponse holds the result data of the AcknowledgeAlertRequest.
type AcknowledgeAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// RenotifyAlertResponse holds the result data of the RenotifyAlertRequest.
type RenotifyAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// TakeOwnershipAlertResponse holds the result data of the TakeOwnershipAlertRequest.
type TakeOwnershipAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// AssignOwnerAlertResponse holds the result data of the AssignOwnerAlertRequest.
type AssignOwnerAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// AddTeamAlertResponse holds the result data of the AddTeamAlertRequest.
type AddTeamAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// AddRecipientAlertResponse holds the result data of the AddRecipientAlertRequest.
type AddRecipientAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// AddNoteAlertResponse holds the result data of the AddNoteAlertRequest.
type AddNoteAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// AddTagsAlertResponse holds the result data of the AddTagsAlertRequest.
type AddTagsAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// ExecuteActionAlertResponse holds the result data of the ExecuteActionAlertRequest.
type ExecuteActionAlertResponse struct {
Result string `json:"result"`
Code int `json:"code"`
}
// AttachFileAlertResponse holds the result data of the AttachFileAlertRequest.
type AttachFileAlertResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// GetAlertResponse holds the result data of the GetAlertRequest.
type GetAlertResponse struct {
Tags []string `json:"tags"`
Count int `json:"count"`
Status string `json:"status"`
Teams []string `json:"teams"`
Recipients []string `json:"recipients"`
TinyID string `json:"tinyId"`
Alias string `json:"alias"`
Entity string `json:"entity"`
ID string `json:"id"`
UpdatedAt uint64 `json:"updatedAt"`
Message string `json:"message"`
Details map[string]string `json:"details"`
Source string `json:"source"`
Description string `json:"description"`
CreatedAt uint64 `json:"createdAt"`
IsSeen bool `json:"isSeen"`
Acknowledged bool `json:"acknowledged"`
Owner string `json:"owner"`
Actions []string `json:"actions"`
SystemData map[string]interface{} `json:"systemData"`
}
//IntegrationType returns extracted "integrationType" data from the retrieved alert' SystemData property.
func (res *GetAlertResponse) IntegrationType() string {
if val, ok := res.SystemData["integrationType"].(string); ok {
return val
}
return ""
}
//IntegrationID returns extracted "integrationId" data from the retrieved alert' SystemData property.
func (res *GetAlertResponse) IntegrationID() string {
if val, ok := res.SystemData["integrationId"].(string); ok {
return val
}
return ""
}
//IntegrationName returns extracted "integrationName" data from the retrieved alert' SystemData property.
func (res *GetAlertResponse) IntegrationName() string {
if val, ok := res.SystemData["integrationName"].(string); ok {
return val
}
return ""
}
//AckTime returns extracted "ackTime" data from the retrieved alert' SystemData property.
func (res *GetAlertResponse) AckTime() uint64 {
if val, ok := res.SystemData["ackTime"].(uint64); ok {
return val
}
return 0
}
//AcknowledgedBy returns extracted "acknowledgedBy" data from the retrieved alert' SystemData property.
func (res *GetAlertResponse) AcknowledgedBy() string {
if val, ok := res.SystemData["acknowledgedBy"].(string); ok {
return val
}
return ""
}
//CloseTime returns extracted "closeTime" data from the retrieved alert' SystemData property.
func (res *GetAlertResponse) CloseTime() uint64 {
if val, ok := res.SystemData["closeTime"].(uint64); ok {
return val
}
return 0
}
//ClosedBy returns extracted "closedBy" data from the retrieved alert' SystemData property.
func (res *GetAlertResponse) ClosedBy() string {
if val, ok := res.SystemData["closedBy"].(string); ok {
return val
}
return ""
}

View File

@ -0,0 +1,218 @@
/*
Copyright 2015 OpsGenie. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package alerts provides requests and response structures to achieve Alert API actions.
package alerts
import (
"os"
)
// AcknowledgeAlertRequest provides necessary parameter structure to Acknowledge an alert at OpsGenie.
type AcknowledgeAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Source string `json:"source,omitempty"`
}
// AddNoteAlertRequest provides necessary parameter structure to Add Note to an alert at OpsGenie.
type AddNoteAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Note string `json:"note,omitempty"`
User string `json:"user,omitempty"`
Source string `json:"source,omitempty"`
}
// AddRecipientAlertRequest provides necessary parameter structure to Add Recipient to an alert at OpsGenie.
type AddRecipientAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Recipient string `json:"recipient,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Source string `json:"source,omitempty"`
}
// AddTeamAlertRequest provides necessary parameter structure to Add Team to an alert at OpsGenie.
type AddTeamAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Team string `json:"team,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Source string `json:"source,omitempty"`
}
// AddTagsAlertRequest provides necessary parameter structure to Add Tags to an alert at OpsGenie.
type AddTagsAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Tags []string `json:"tags,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Source string `json:"source,omitempty"`
}
// AssignOwnerAlertRequest provides necessary parameter structure to Assign a User as Owner to an alert at OpsGenie.
type AssignOwnerAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Owner string `json:"owner,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Source string `json:"source,omitempty"`
}
// AttachFileAlertRequest provides necessary parameter structure to Attach File to an alert at OpsGenie.
type AttachFileAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Attachment *os.File `json:"attachment,omitempty"`
User string `json:"user,omitempty"`
Source string `json:"source,omitempty"`
IndexFile string `json:"indexFile,omitempty"`
Note string `json:"note,omitempty"`
}
// CloseAlertRequest provides necessary parameter structure to Close an alert at OpsGenie.
type CloseAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Notify []string `json:"notify,omitempty"`
Source string `json:"source,omitempty"`
}
// CreateAlertRequest provides necessary parameter structure to Create an alert at OpsGenie.
type CreateAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
Message string `json:"message,omitempty"`
Teams []string `json:"teams,omitempty"`
Alias string `json:"alias,omitempty"`
Description string `json:"description,omitempty"`
Recipients []string `json:"recipients,omitempty"`
Actions []string `json:"actions,omitempty"`
Source string `json:"source,omitempty"`
Tags []string `json:"tags,omitempty"`
Details map[string]string `json:"details,omitempty"`
Entity string `json:"entity,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
}
// DeleteAlertRequest provides necessary parameter structure to Delete an alert from OpsGenie.
type DeleteAlertRequest struct {
APIKey string `url:"apiKey,omitempty"`
ID string `url:"id,omitempty"`
Alias string `url:"alias,omitempty"`
User string `url:"user,omitempty"`
Source string `url:"source,omitempty"`
}
// ExecuteActionAlertRequest provides necessary parameter structure to Execute Custom Actions on an alert at OpsGenie.
type ExecuteActionAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Action string `json:"action,omitempty"`
User string `json:"user,omitempty"`
Source string `json:"source,omitempty"`
Note string `json:"note,omitempty"`
}
// GetAlertRequest provides necessary parameter structure to Retrieve an alert details from OpsGenie.
type GetAlertRequest struct {
APIKey string `url:"apiKey,omitempty"`
ID string `url:"id,omitempty"`
Alias string `url:"alias,omitempty"`
TinyID string `url:"tinyId,omitempty"`
}
// ListAlertLogsRequest provides necessary parameter structure to Retrieve activity logs of an alert from OpsGenie.
type ListAlertLogsRequest struct {
APIKey string `url:"apiKey,omitempty"`
ID string `url:"id,omitempty"`
Alias string `url:"alias,omitempty"`
Limit uint64 `url:"limit,omitempty"`
Order string `url:"order,omitempty"`
LastKey string `url:"lastKey,omitempty"`
}
// ListAlertNotesRequest provides necessary parameter structure to Retrieve notes of an alert from OpsGenie.
type ListAlertNotesRequest struct {
APIKey string `url:"apiKey,omitempty"`
ID string `url:"id,omitempty"`
Alias string `url:"alias,omitempty"`
Limit uint64 `url:"limit,omitempty"`
Order string `url:"order,omitempty"`
LastKey string `url:"lastKey,omitempty"`
}
// ListAlertRecipientsRequest provides necessary parameter structure to Retrieve recipients of an alert from OpsGenie.
type ListAlertRecipientsRequest struct {
APIKey string `url:"apiKey,omitempty"`
ID string `url:"id,omitempty"`
Alias string `url:"alias,omitempty"`
}
// ListAlertsRequest provides necessary parameter structure to Retrieve alerts from OpsGenie.
type ListAlertsRequest struct {
APIKey string `url:"apiKey,omitempty"`
CreatedAfter uint64 `url:"createdAfter,omitempty"`
CreatedBefore uint64 `url:"createdBefore,omitempty"`
UpdatedAfter uint64 `url:"updatedAfter,omitempty"`
UpdatedBefore uint64 `url:"updatedBefore,omitempty"`
Limit uint64 `url:"limit,omitempty"`
Status string `url:"status,omitempty"`
SortBy string `url:"sortBy,omitempty"`
Order string `url:"order,omitempty"`
}
// CountAlertRequest counts the alerts at OpsGenie.
type CountAlertRequest struct {
APIKey string `url:"apiKey,omitempty"`
CreatedAfter uint64 `url:"createdAfter,omitempty"`
CreatedBefore uint64 `url:"createdBefore,omitempty"`
UpdatedAfter uint64 `url:"updatedAfter,omitempty"`
UpdatedBefore uint64 `url:"updatedBefore,omitempty"`
Limit uint64 `url:"limit,omitempty"`
Status string `url:"status,omitempty"`
Tags []string `json:"tags,omitempty"`
TagsOperator string `url:"tagsoperator,omitempty"`
}
// RenotifyAlertRequest provides necessary parameter structure to Re-notify recipients at OpsGenie.
type RenotifyAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
Recipients []string `json:"recipients,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Source string `json:"source,omitempty"`
}
// TakeOwnershipAlertRequest provides necessary parameter structure to Become the Owner of an alert at OpsGenie.
type TakeOwnershipAlertRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Alias string `json:"alias,omitempty"`
User string `json:"user,omitempty"`
Note string `json:"note,omitempty"`
Source string `json:"source,omitempty"`
}

View File

@ -0,0 +1,365 @@
/*
Copyright 2015 OpsGenie. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package client provides clients for using the OpsGenie Web API. Also prepares and sends requests.
//API user first creates a OpsGenieClient instance.
//
//cli := new(ogcli.OpsGenieClient)
//
//Following that he/she can set APIKey and some configurations for HTTP communication layer by setting
//a proxy definition and/or transport layer options.
//
//cli.SetAPIKey(constants.APIKey)
//
//Then create the client of the API type that he/she wants to use.
//
//alertCli, cliErr := cli.Alert()
//
//if cliErr != nil {
//panic(cliErr)
//}
//
//The most fundamental and general use case is being able to access the
//OpsGenie Web API by coding a Go program.
//The program -by mean of a client application- can send OpsGenie Web API
//the requests using the 'client' package in a higher level. For the programmer
//of the client application, that reduces the number of LoCs.
//Besides it will result a less error-prone application and reduce
//the complexity by hiding the low-level networking, error-handling and
//byte-processing calls.
package client
import (
"encoding/json"
"errors"
"fmt"
"runtime"
"time"
"github.com/franela/goreq"
goquery "github.com/google/go-querystring/query"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
// endpointURL is the base URL of OpsGenie Web API.
var endpointURL = "https://api.opsgenie.com"
const (
defaultConnectionTimeout time.Duration = 30 * time.Second
defaultRequestTimeout time.Duration = 60 * time.Second
defaultMaxRetryAttempts int = 5
timeSleepBetweenRequests time.Duration = 500 * time.Millisecond
)
// RequestHeaderUserAgent contains User-Agent values tool/version (OS;GO_Version;language).
type requestHeaderUserAgent struct {
sdkName string
version string
os string
goVersion string
timezone string
}
// ToString formats and returns RequestHeaderUserAgent type's fields as string.
func (p requestHeaderUserAgent) ToString() string {
return fmt.Sprintf("%s/%s (%s;%s;%s)", p.sdkName, p.version, p.os, p.goVersion, p.timezone)
}
var userAgentParam requestHeaderUserAgent
/*
OpsGenieClient is a general data type used for:
- authenticating callers through their API keys and
- instantiating "alert", "heartbeat", "integration" and "policy" clients
- setting HTTP transport layer configurations
- setting Proxy configurations
*/
type OpsGenieClient struct {
proxy *ProxyConfiguration
httpTransportSettings *HTTPTransportSettings
apiKey string
opsGenieAPIURL string
}
// SetProxyConfiguration sets proxy configurations of the OpsGenieClient.
func (cli *OpsGenieClient) SetProxyConfiguration(conf *ProxyConfiguration) {
cli.proxy = conf
}
// SetHTTPTransportSettings sets HTTP transport layer configurations of the OpsGenieClient.
func (cli *OpsGenieClient) SetHTTPTransportSettings(settings *HTTPTransportSettings) {
cli.httpTransportSettings = settings
}
// SetAPIKey sets API Key of the OpsGenieClient and authenticates callers through the API Key at OpsGenie.
func (cli *OpsGenieClient) SetAPIKey(key string) {
cli.apiKey = key
}
// SetOpsGenieAPIUrl sets the endpoint(base URL) that requests will send. It can be used for testing purpose.
func (cli *OpsGenieClient) SetOpsGenieAPIUrl(url string) {
if url != "" {
cli.opsGenieAPIURL = url
}
}
// OpsGenieAPIUrl returns the current endpoint(base URL) that requests will send.
func (cli *OpsGenieClient) OpsGenieAPIUrl() string {
if cli.opsGenieAPIURL == "" {
cli.opsGenieAPIURL = endpointURL
}
return cli.opsGenieAPIURL
}
// APIKey returns the API Key value that OpsGenieClient uses to authenticate at OpsGenie.
func (cli *OpsGenieClient) APIKey() string {
return cli.apiKey
}
// makeHTTPTransportSettings internal method to set default values of HTTP transport layer configuration if necessary.
func (cli *OpsGenieClient) makeHTTPTransportSettings() {
if cli.httpTransportSettings != nil {
if cli.httpTransportSettings.MaxRetryAttempts <= 0 {
cli.httpTransportSettings.MaxRetryAttempts = defaultMaxRetryAttempts
}
if cli.httpTransportSettings.ConnectionTimeout <= 0 {
cli.httpTransportSettings.ConnectionTimeout = defaultConnectionTimeout
}
if cli.httpTransportSettings.RequestTimeout <= 0 {
cli.httpTransportSettings.RequestTimeout = defaultRequestTimeout
}
} else {
cli.httpTransportSettings = &HTTPTransportSettings{MaxRetryAttempts: defaultMaxRetryAttempts, ConnectionTimeout: defaultConnectionTimeout, RequestTimeout: defaultRequestTimeout}
}
}
// Alert instantiates a new OpsGenieAlertClient.
func (cli *OpsGenieClient) Alert() (*OpsGenieAlertClient, error) {
cli.makeHTTPTransportSettings()
alertClient := new(OpsGenieAlertClient)
alertClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
alertClient.SetOpsGenieAPIUrl(endpointURL)
}
return alertClient, nil
}
// Heartbeat instantiates a new OpsGenieHeartbeatClient.
func (cli *OpsGenieClient) Heartbeat() (*OpsGenieHeartbeatClient, error) {
cli.makeHTTPTransportSettings()
heartbeatClient := new(OpsGenieHeartbeatClient)
heartbeatClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
heartbeatClient.SetOpsGenieAPIUrl(endpointURL)
}
return heartbeatClient, nil
}
// Integration instantiates a new OpsGenieIntegrationClient.
func (cli *OpsGenieClient) Integration() (*OpsGenieIntegrationClient, error) {
cli.makeHTTPTransportSettings()
integrationClient := new(OpsGenieIntegrationClient)
integrationClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
integrationClient.SetOpsGenieAPIUrl(endpointURL)
}
return integrationClient, nil
}
// Policy instantiates a new OpsGeniePolicyClient.
func (cli *OpsGenieClient) Policy() (*OpsGeniePolicyClient, error) {
cli.makeHTTPTransportSettings()
policyClient := new(OpsGeniePolicyClient)
policyClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
policyClient.SetOpsGenieAPIUrl(endpointURL)
}
return policyClient, nil
}
// Team instantiates a new OpsGenieTeamClient.
func (cli *OpsGenieClient) Team() (*OpsGenieTeamClient, error) {
cli.makeHTTPTransportSettings()
teamClient := new(OpsGenieTeamClient)
teamClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
teamClient.SetOpsGenieAPIUrl(endpointURL)
}
return teamClient, nil
}
// Escalation instantiates a new OpsGenieEscalationClient.
func (cli *OpsGenieClient) Escalation() (*OpsGenieEscalationClient, error) {
cli.makeHTTPTransportSettings()
escalationClient := new(OpsGenieEscalationClient)
escalationClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
escalationClient.SetOpsGenieAPIUrl(endpointURL)
}
return escalationClient, nil
}
// Schedule instantiates a new OpsGenieScheduleClient.
func (cli *OpsGenieClient) Schedule() (*OpsGenieScheduleClient, error) {
cli.makeHTTPTransportSettings()
scheduleClient := new(OpsGenieScheduleClient)
scheduleClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
scheduleClient.SetOpsGenieAPIUrl(endpointURL)
}
return scheduleClient, nil
}
// User instantiates a new OpsGenieUserClient.
func (cli *OpsGenieClient) User() (*OpsGenieUserClient, error) {
cli.makeHTTPTransportSettings()
userClient := new(OpsGenieUserClient)
userClient.SetOpsGenieClient(*cli)
if cli.opsGenieAPIURL == "" {
userClient.SetOpsGenieAPIUrl(endpointURL)
}
return userClient, nil
}
// buildCommonRequestProps is an internal method to set common properties of requests that will send to OpsGenie.
func (cli *OpsGenieClient) buildCommonRequestProps() goreq.Request {
if cli.httpTransportSettings == nil {
cli.makeHTTPTransportSettings()
}
goreq.SetConnectTimeout(cli.httpTransportSettings.ConnectionTimeout)
req := goreq.Request{}
if cli.proxy != nil {
req.Proxy = cli.proxy.toString()
}
req.UserAgent = userAgentParam.ToString()
req.Timeout = cli.httpTransportSettings.RequestTimeout
req.Insecure = true
return req
}
// buildGetRequest is an internal method to prepare a "GET" request that will send to OpsGenie.
func (cli *OpsGenieClient) buildGetRequest(uri string, request interface{}) goreq.Request {
req := cli.buildCommonRequestProps()
req.Method = "GET"
req.ContentType = "application/x-www-form-urlencoded; charset=UTF-8"
uri = cli.OpsGenieAPIUrl() + uri
if request != nil {
v, _ := goquery.Values(request)
req.Uri = uri + "?" + v.Encode()
} else {
req.Uri = uri
}
logging.Logger().Info("Executing OpsGenie request to ["+uri+"] with parameters: ")
return req
}
// buildPostRequest is an internal method to prepare a "POST" request that will send to OpsGenie.
func (cli *OpsGenieClient) buildPostRequest(uri string, request interface{}) goreq.Request {
req := cli.buildCommonRequestProps()
req.Method = "POST"
req.ContentType = "application/json; charset=utf-8"
req.Uri = cli.OpsGenieAPIUrl() + uri
req.Body = request
j, _ := json.Marshal(request)
logging.Logger().Info("Executing OpsGenie request to ["+req.Uri+"] with content parameters: ", string(j))
return req
}
// buildDeleteRequest is an internal method to prepare a "DELETE" request that will send to OpsGenie.
func (cli *OpsGenieClient) buildDeleteRequest(uri string, request interface{}) goreq.Request {
req := cli.buildGetRequest(uri, request)
req.Method = "DELETE"
return req
}
// sendRequest is an internal method to send the prepared requests to OpsGenie.
func (cli *OpsGenieClient) sendRequest(req goreq.Request) (*goreq.Response, error) {
// send the request
var resp *goreq.Response
var err error
for i := 0; i < cli.httpTransportSettings.MaxRetryAttempts; i++ {
resp, err = req.Do()
if err == nil && resp.StatusCode < 500 {
break
}
if resp != nil {
defer resp.Body.Close()
logging.Logger().Info(fmt.Sprintf("Retrying request [%s] ResponseCode:[%d]. RetryCount: %d", req.Uri, resp.StatusCode, (i + 1)))
} else {
logging.Logger().Info(fmt.Sprintf("Retrying request [%s] Reason:[%s]. RetryCount: %d", req.Uri, err.Error(), (i + 1)))
}
time.Sleep(timeSleepBetweenRequests * time.Duration(i+1))
}
if err != nil {
message := "Unable to send the request " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
// check for the returning http status
statusCode := resp.StatusCode
if statusCode >= 400 {
body, err := resp.Body.ToString()
if err != nil {
message := "Server response with error can not be parsed " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return nil, errorMessage(statusCode, body)
}
return resp, nil
}
// errorMessage is an internal method to return formatted error message according to HTTP status code of the response.
func errorMessage(httpStatusCode int, responseBody string) error {
if httpStatusCode >= 400 && httpStatusCode < 500 {
message := fmt.Sprintf("Client error occurred; Response Code: %d, Response Body: %s", httpStatusCode, responseBody)
logging.Logger().Warn(message)
return errors.New(message)
}
if httpStatusCode >= 500 {
message := fmt.Sprintf("Server error occurred; Response Code: %d, Response Body: %s", httpStatusCode, responseBody)
logging.Logger().Info(message)
return errors.New(message)
}
return nil
}
// Initializer for the package client
// Initializes the User-Agent parameter of the requests.
// TODO version information must be read from a MANIFEST file
func init() {
userAgentParam.sdkName = "opsgenie-go-sdk"
userAgentParam.version = "1.0.0"
userAgentParam.os = runtime.GOOS
userAgentParam.goVersion = runtime.Version()
userAgentParam.timezone = time.Local.String()
}

View File

@ -0,0 +1,37 @@
package client
import (
"fmt"
"time"
)
// ProxyConfiguration is the type that contains the proxy configurations of the OpsGenieClient.
type ProxyConfiguration struct {
Host string
Port int
Username string
Password string
ProxyURI string
Protocol string
}
// HTTPTransportSettings is the type that contains the HTTP transport layer configurations of the OpsGenieClient.
type HTTPTransportSettings struct {
ConnectionTimeout time.Duration
RequestTimeout time.Duration
MaxRetryAttempts int
}
// toString is an internal method that formats and returns proxy configurations of the OpsGenieClient.
func (proxy *ProxyConfiguration) toString() string {
if proxy.ProxyURI != "" {
return proxy.ProxyURI
}
if proxy.Protocol == "" {
proxy.Protocol = "http"
}
if proxy.Username != "" && proxy.Password != "" {
return fmt.Sprintf("%s://%s:%s@%s:%d", proxy.Protocol, proxy.Username, proxy.Password, proxy.Host, proxy.Port)
}
return fmt.Sprintf("%s://%s:%d", proxy.Protocol, proxy.Host, proxy.Port)
}

View File

@ -0,0 +1,559 @@
package client
import (
"bytes"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"time"
"github.com/opsgenie/opsgenie-go-sdk/alerts"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
const (
createAlertURL = "/v1/json/alert"
closeAlertURL = "/v1/json/alert/close"
deleteAlertURL = "/v1/json/alert"
getAlertURL = "/v1/json/alert"
listAlertsURL = "/v1/json/alert"
listAlertNotesURL = "/v1/json/alert/note"
listAlertLogsURL = "/v1/json/alert/log"
listAlertRecipientsURL = "/v1/json/alert/recipient"
acknowledgeAlertURL = "/v1/json/alert/acknowledge"
renotifyAlertURL = "/v1/json/alert/renotify"
takeOwnershipAlertURL = "/v1/json/alert/takeOwnership"
assignOwnershipAlertURL = "/v1/json/alert/assign"
addTeamAlertURL = "/v1/json/alert/team"
addRecipientAlertURL = "/v1/json/alert/recipient"
addNoteAlertURL = "/v1/json/alert/note"
addTagsAlertURL = "/v1/json/alert/tags"
executeActionAlertURL = "/v1/json/alert/executeAction"
attachFileAlertURL = "/v1/json/alert/attach"
countAlertURL = "/v1/json/alert/count"
)
// OpsGenieAlertClient is the data type to make Alert API requests.
type OpsGenieAlertClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGenieAlertClient.
func (cli *OpsGenieAlertClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Create method creates an alert at OpsGenie.
func (cli *OpsGenieAlertClient) Create(req alerts.CreateAlertRequest) (*alerts.CreateAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(createAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var createAlertResp alerts.CreateAlertResponse
if err = resp.Body.FromJsonTo(&createAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &createAlertResp, nil
}
// Count method counts alerts at OpsGenie.
func (cli *OpsGenieAlertClient) Count(req alerts.CountAlertRequest) (*alerts.CountAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(countAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var countAlertResp alerts.CountAlertResponse
if err = resp.Body.FromJsonTo(&countAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &countAlertResp, nil
}
// Close method closes an alert at OpsGenie.
func (cli *OpsGenieAlertClient) Close(req alerts.CloseAlertRequest) (*alerts.CloseAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(closeAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var closeAlertResp alerts.CloseAlertResponse
if err = resp.Body.FromJsonTo(&closeAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &closeAlertResp, nil
}
// Delete method deletes an alert at OpsGenie.
func (cli *OpsGenieAlertClient) Delete(req alerts.DeleteAlertRequest) (*alerts.DeleteAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildDeleteRequest(deleteAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var deleteAlertResp alerts.DeleteAlertResponse
if err = resp.Body.FromJsonTo(&deleteAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &deleteAlertResp, nil
}
// Get method retrieves specified alert details from OpsGenie.
func (cli *OpsGenieAlertClient) Get(req alerts.GetAlertRequest) (*alerts.GetAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(getAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var getAlertResp alerts.GetAlertResponse
if err = resp.Body.FromJsonTo(&getAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &getAlertResp, nil
}
// List method retrieves alerts from OpsGenie.
func (cli *OpsGenieAlertClient) List(req alerts.ListAlertsRequest) (*alerts.ListAlertsResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(listAlertsURL, req))
if resp == nil {
return nil, errors.New(err.Error())
}
defer resp.Body.Close()
var listAlertsResp alerts.ListAlertsResponse
if err = resp.Body.FromJsonTo(&listAlertsResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listAlertsResp, nil
}
// ListNotes method retrieves notes of an alert from OpsGenie.
func (cli *OpsGenieAlertClient) ListNotes(req alerts.ListAlertNotesRequest) (*alerts.ListAlertNotesResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(listAlertNotesURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var listAlertNotesResp alerts.ListAlertNotesResponse
if err = resp.Body.FromJsonTo(&listAlertNotesResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listAlertNotesResp, nil
}
// ListLogs method retrieves activity logs of an alert from OpsGenie.
func (cli *OpsGenieAlertClient) ListLogs(req alerts.ListAlertLogsRequest) (*alerts.ListAlertLogsResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(listAlertLogsURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var listAlertLogsResp alerts.ListAlertLogsResponse
if err = resp.Body.FromJsonTo(&listAlertLogsResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listAlertLogsResp, nil
}
// ListRecipients method retrieves recipients of an alert from OpsGenie.
func (cli *OpsGenieAlertClient) ListRecipients(req alerts.ListAlertRecipientsRequest) (*alerts.ListAlertRecipientsResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(listAlertRecipientsURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var listAlertRecipientsResp alerts.ListAlertRecipientsResponse
if err = resp.Body.FromJsonTo(&listAlertRecipientsResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listAlertRecipientsResp, nil
}
// Acknowledge method acknowledges an alert at OpsGenie.
func (cli *OpsGenieAlertClient) Acknowledge(req alerts.AcknowledgeAlertRequest) (*alerts.AcknowledgeAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(acknowledgeAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var acknowledgeAlertResp alerts.AcknowledgeAlertResponse
if err = resp.Body.FromJsonTo(&acknowledgeAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &acknowledgeAlertResp, nil
}
// Renotify re-notifies recipients at OpsGenie.
func (cli *OpsGenieAlertClient) Renotify(req alerts.RenotifyAlertRequest) (*alerts.RenotifyAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(renotifyAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var renotifyAlertResp alerts.RenotifyAlertResponse
if err = resp.Body.FromJsonTo(&renotifyAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &renotifyAlertResp, nil
}
// TakeOwnership method takes the ownership of an alert at OpsGenie.
func (cli *OpsGenieAlertClient) TakeOwnership(req alerts.TakeOwnershipAlertRequest) (*alerts.TakeOwnershipAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(takeOwnershipAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var takeOwnershipResp alerts.TakeOwnershipAlertResponse
if err = resp.Body.FromJsonTo(&takeOwnershipResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &takeOwnershipResp, nil
}
// AssignOwner method assigns the specified user as the owner of the alert at OpsGenie.
func (cli *OpsGenieAlertClient) AssignOwner(req alerts.AssignOwnerAlertRequest) (*alerts.AssignOwnerAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(assignOwnershipAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var assignOwnerAlertResp alerts.AssignOwnerAlertResponse
if err = resp.Body.FromJsonTo(&assignOwnerAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &assignOwnerAlertResp, nil
}
// AddTeam method adds a team to an alert at OpsGenie.
func (cli *OpsGenieAlertClient) AddTeam(req alerts.AddTeamAlertRequest) (*alerts.AddTeamAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(addTeamAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var addTeamAlertResp alerts.AddTeamAlertResponse
if err = resp.Body.FromJsonTo(&addTeamAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &addTeamAlertResp, nil
}
// AddRecipient method adds recipient to an alert at OpsGenie.
func (cli *OpsGenieAlertClient) AddRecipient(req alerts.AddRecipientAlertRequest) (*alerts.AddRecipientAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(addRecipientAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var addRecipientAlertResp alerts.AddRecipientAlertResponse
if err = resp.Body.FromJsonTo(&addRecipientAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &addRecipientAlertResp, nil
}
// AddNote method adds a note to an alert at OpsGenie.
func (cli *OpsGenieAlertClient) AddNote(req alerts.AddNoteAlertRequest) (*alerts.AddNoteAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(addNoteAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var addNoteAlertResp alerts.AddNoteAlertResponse
if err = resp.Body.FromJsonTo(&addNoteAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &addNoteAlertResp, nil
}
// AddTags method adds tags to an alert at OpsGenie.
func (cli *OpsGenieAlertClient) AddTags(req alerts.AddTagsAlertRequest) (*alerts.AddTagsAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(addTagsAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var addTagsAlertResp alerts.AddTagsAlertResponse
if err = resp.Body.FromJsonTo(&addTagsAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &addTagsAlertResp, nil
}
// ExecuteAction method executes a custom action on an alert at OpsGenie.
func (cli *OpsGenieAlertClient) ExecuteAction(req alerts.ExecuteActionAlertRequest) (*alerts.ExecuteActionAlertResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(executeActionAlertURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var executeActionAlertResp alerts.ExecuteActionAlertResponse
if err = resp.Body.FromJsonTo(&executeActionAlertResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &executeActionAlertResp, nil
}
// AttachFile method attaches a file to an alert at OpsGenie.
func (cli *OpsGenieAlertClient) AttachFile(req alerts.AttachFileAlertRequest) (*alerts.AttachFileAlertResponse, error) {
req.APIKey = cli.apiKey
var b bytes.Buffer
w := multipart.NewWriter(&b)
path := req.Attachment.Name()
file, err := os.Open(path)
defer file.Close()
if err != nil {
message := "Attachment can not be opened for reading. " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
// add the attachment
fw, err := w.CreateFormFile("attachment", filepath.Base(path))
if err != nil {
message := "Can not build the request with the field attachment. " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
if _, err := io.Copy(fw, file); err != nil {
message := "Can not copy the attachment into the request. " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
// Add the other fields
// empty fields should not be placed into the request
// otherwise it yields an incomplete boundary exception
if req.APIKey != "" {
if err = writeField(*w, "apiKey", req.APIKey); err != nil {
return nil, err
}
}
if req.ID != "" {
if err = writeField(*w, "id", req.ID); err != nil {
return nil, err
}
}
if req.Alias != "" {
if err = writeField(*w, "alias", req.Alias); err != nil {
return nil, err
}
}
if req.User != "" {
if err = writeField(*w, "user", req.User); err != nil {
return nil, err
}
}
if req.Source != "" {
if err = writeField(*w, "source", req.Source); err != nil {
return nil, err
}
}
if req.IndexFile != "" {
if err = writeField(*w, "indexFile", req.IndexFile); err != nil {
return nil, err
}
}
if req.Note != "" {
if err = writeField(*w, "note", req.Note); err != nil {
return nil, err
}
}
w.Close()
httpReq, err := http.NewRequest("POST", cli.opsGenieAPIURL+attachFileAlertURL, &b)
if err != nil {
message := "Can not create the multipart/form-data request. " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
httpReq.Header.Set("Content-Type", w.FormDataContentType())
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyFromEnvironment,
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, cli.httpTransportSettings.ConnectionTimeout)
if err != nil {
message := "Error occurred while connecting: " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
conn.SetDeadline(time.Now().Add(cli.httpTransportSettings.RequestTimeout))
return conn, nil
},
}
client := &http.Client{Transport: transport}
// proxy settings
if cli.proxy != nil {
proxyURL, proxyErr := url.Parse(cli.proxy.toString())
if proxyErr != nil {
message := "Can not set the proxy configuration " + proxyErr.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
transport.Proxy = http.ProxyURL(proxyURL)
}
url := httpReq.URL.String()
logging.Logger().Info("Executing OpsGenie request to [" + url + "] with multipart data.")
var res *http.Response
for i := 0; i < cli.httpTransportSettings.MaxRetryAttempts; i++ {
res, err = client.Do(httpReq)
if err == nil {
defer res.Body.Close()
break
}
if res != nil {
logging.Logger().Info(fmt.Sprintf("Retrying request [%s] ResponseCode:[%d]. RetryCount: %d", url, res.StatusCode, (i + 1)))
} else {
logging.Logger().Info(fmt.Sprintf("Retrying request [%s] Reason:[%s]. RetryCount: %d", url, err.Error(), (i + 1)))
}
time.Sleep(timeSleepBetweenRequests)
}
if err != nil {
message := "Can not attach the file, unable to send the request. " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
httpStatusCode := res.StatusCode
if httpStatusCode >= 400 {
body, err := ioutil.ReadAll(res.Body)
if err == nil {
return nil, errorMessage(httpStatusCode, string(body[:]))
}
message := fmt.Sprint("Couldn't read the response, %s", err.Error())
logging.Logger().Warn(message)
return nil, errors.New(message)
}
attachFileAlertResp := alerts.AttachFileAlertResponse{Status: res.Status, Code: res.StatusCode}
return &attachFileAlertResp, nil
}
func writeField(w multipart.Writer, fieldName string, fieldVal string) error {
if err := w.WriteField(fieldName, fieldVal); err != nil {
message := "Can not write field " + fieldName + " into the request. Reason: " + err.Error()
logging.Logger().Warn(message)
return errors.New(message)
}
return nil
}

View File

@ -0,0 +1,121 @@
package client
import (
"errors"
"github.com/opsgenie/opsgenie-go-sdk/escalation"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
const (
escalationURL = "/v1/json/escalation"
)
// OpsGenieEscalationClient is the data type to make Escalation API requests.
type OpsGenieEscalationClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGenieEscalationClient.
func (cli *OpsGenieEscalationClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Create method creates a escalation at OpsGenie.
func (cli *OpsGenieEscalationClient) Create(req escalation.CreateEscalationRequest) (*escalation.CreateEscalationResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(escalationURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var createEscalationResp escalation.CreateEscalationResponse
if err = resp.Body.FromJsonTo(&createEscalationResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &createEscalationResp, nil
}
// Update method updates a escalation at OpsGenie.
func (cli *OpsGenieEscalationClient) Update(req escalation.UpdateEscalationRequest) (*escalation.UpdateEscalationResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(escalationURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var updateEscalationResp escalation.UpdateEscalationResponse
if err = resp.Body.FromJsonTo(&updateEscalationResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &updateEscalationResp, nil
}
// Delete method deletes a escalation at OpsGenie.
func (cli *OpsGenieEscalationClient) Delete(req escalation.DeleteEscalationRequest) (*escalation.DeleteEscalationResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildDeleteRequest(escalationURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var deleteEscalationResp escalation.DeleteEscalationResponse
if err = resp.Body.FromJsonTo(&deleteEscalationResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &deleteEscalationResp, nil
}
// Get method retrieves specified escalation details from OpsGenie.
func (cli *OpsGenieEscalationClient) Get(req escalation.GetEscalationRequest) (*escalation.GetEscalationResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(escalationURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var getEscalationResp escalation.GetEscalationResponse
if err = resp.Body.FromJsonTo(&getEscalationResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &getEscalationResp, nil
}
// List method retrieves escalations from OpsGenie.
func (cli *OpsGenieEscalationClient) List(req escalation.ListEscalationsRequest) (*escalation.ListEscalationsResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(escalationURL, req))
if resp == nil {
return nil, errors.New(err.Error())
}
defer resp.Body.Close()
var listEscalationsResp escalation.ListEscalationsResponse
if err = resp.Body.FromJsonTo(&listEscalationsResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listEscalationsResp, nil
}

View File

@ -0,0 +1,191 @@
package client
import (
"errors"
"github.com/opsgenie/opsgenie-go-sdk/heartbeat"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
const (
addHeartbeatURL = "/v1/json/heartbeat"
updateHeartbeatURL = "/v1/json/heartbeat"
enableHeartbeatURL = "/v1/json/heartbeat/enable"
disableHeartbeatURL = "/v1/json/heartbeat/disable"
deleteHeartbeatURL = "/v1/json/heartbeat"
getHeartbeatURL = "/v1/json/heartbeat"
listHeartbeatURL = "/v1/json/heartbeat"
sendHeartbeatURL = "/v1/json/heartbeat/send"
)
// OpsGenieHeartbeatClient is the data type to make Heartbeat API requests.
type OpsGenieHeartbeatClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGenieHeartbeatClient.
func (cli *OpsGenieHeartbeatClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Add method creates a heartbeat at OpsGenie.
func (cli *OpsGenieHeartbeatClient) Add(req heartbeat.AddHeartbeatRequest) (*heartbeat.AddHeartbeatResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(addHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var addHeartbeatResp heartbeat.AddHeartbeatResponse
if err = resp.Body.FromJsonTo(&addHeartbeatResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &addHeartbeatResp, nil
}
// Update method changes configuration of an existing heartbeat at OpsGenie.
func (cli *OpsGenieHeartbeatClient) Update(req heartbeat.UpdateHeartbeatRequest) (*heartbeat.UpdateHeartbeatResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(updateHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var updateHeartbeatResp heartbeat.UpdateHeartbeatResponse
if err = resp.Body.FromJsonTo(&updateHeartbeatResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &updateHeartbeatResp, nil
}
// Enable method enables an heartbeat at OpsGenie.
func (cli *OpsGenieHeartbeatClient) Enable(req heartbeat.EnableHeartbeatRequest) (*heartbeat.EnableHeartbeatResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(enableHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var enableHeartbeatResp heartbeat.EnableHeartbeatResponse
if err = resp.Body.FromJsonTo(&enableHeartbeatResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &enableHeartbeatResp, nil
}
// Disable method disables an heartbeat at OpsGenie.
func (cli *OpsGenieHeartbeatClient) Disable(req heartbeat.DisableHeartbeatRequest) (*heartbeat.DisableHeartbeatResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(disableHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var disableHeartbeatResp heartbeat.DisableHeartbeatResponse
if err = resp.Body.FromJsonTo(&disableHeartbeatResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &disableHeartbeatResp, nil
}
// Delete method deletes an heartbeat from OpsGenie.
func (cli *OpsGenieHeartbeatClient) Delete(req heartbeat.DeleteHeartbeatRequest) (*heartbeat.DeleteHeartbeatResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildDeleteRequest(deleteHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var deleteHeartbeatResp heartbeat.DeleteHeartbeatResponse
if err = resp.Body.FromJsonTo(&deleteHeartbeatResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &deleteHeartbeatResp, nil
}
// Get method retrieves an heartbeat with details from OpsGenie.
func (cli *OpsGenieHeartbeatClient) Get(req heartbeat.GetHeartbeatRequest) (*heartbeat.GetHeartbeatResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(getHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var getHeartbeatResp heartbeat.GetHeartbeatResponse
if err = resp.Body.FromJsonTo(&getHeartbeatResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &getHeartbeatResp, nil
}
// List method retrieves heartbeats from OpsGenie.
func (cli *OpsGenieHeartbeatClient) List(req heartbeat.ListHeartbeatsRequest) (*heartbeat.ListHeartbeatsResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(listHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var listHeartbeatsResp heartbeat.ListHeartbeatsResponse
if err = resp.Body.FromJsonTo(&listHeartbeatsResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listHeartbeatsResp, nil
}
// Send method sends an Heartbeat Signal to OpsGenie.
func (cli *OpsGenieHeartbeatClient) Send(req heartbeat.SendHeartbeatRequest) (*heartbeat.SendHeartbeatResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(sendHeartbeatURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var sendHeartbeatResp heartbeat.SendHeartbeatResponse
if err = resp.Body.FromJsonTo(&sendHeartbeatResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &sendHeartbeatResp, nil
}

View File

@ -0,0 +1,63 @@
package client
import (
"errors"
integration "github.com/opsgenie/opsgenie-go-sdk/integration"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
const (
enableIntegrationURL = "/v1/json/integration/enable"
disableIntegrationURL = "/v1/json/integration/disable"
)
// OpsGenieIntegrationClient is the data type to make Integration API requests.
type OpsGenieIntegrationClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGenieIntegrationClient.
func (cli *OpsGenieIntegrationClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Enable method enables an Integration at OpsGenie.
func (cli *OpsGenieIntegrationClient) Enable(req integration.EnableIntegrationRequest) (*integration.EnableIntegrationResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(enableIntegrationURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var enableIntegrationResp integration.EnableIntegrationResponse
if err = resp.Body.FromJsonTo(&enableIntegrationResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &enableIntegrationResp, nil
}
// Disable method disables an Integration at OpsGenie.
func (cli *OpsGenieIntegrationClient) Disable(req integration.DisableIntegrationRequest) (*integration.DisableIntegrationResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(disableIntegrationURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var disableIntegrationResp integration.DisableIntegrationResponse
if err = resp.Body.FromJsonTo(&disableIntegrationResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &disableIntegrationResp, nil
}

View File

@ -0,0 +1,61 @@
package client
import (
"errors"
"github.com/opsgenie/opsgenie-go-sdk/logging"
policy "github.com/opsgenie/opsgenie-go-sdk/policy"
)
const (
enablePolicyURL = "/v1/json/alert/policy/enable"
disablePolicyURL = "/v1/json/alert/policy/disable"
)
// OpsGeniePolicyClient is the data type to make Policy API requests.
type OpsGeniePolicyClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGeniePolicyClient.
func (cli *OpsGeniePolicyClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Enable method enables an Policy at OpsGenie.
func (cli *OpsGeniePolicyClient) Enable(req policy.EnablePolicyRequest) (*policy.EnablePolicyResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(enablePolicyURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var enablePolicyResp policy.EnablePolicyResponse
if err = resp.Body.FromJsonTo(&enablePolicyResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &enablePolicyResp, nil
}
// Disable method disables an Policy at OpsGenie.
func (cli *OpsGeniePolicyClient) Disable(req policy.DisablePolicyRequest) (*policy.DisablePolicyResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(disablePolicyURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var disablePolicyResp policy.DisablePolicyResponse
if err = resp.Body.FromJsonTo(&disablePolicyResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &disablePolicyResp, nil
}

View File

@ -0,0 +1,124 @@
package client
import (
"errors"
"fmt"
"github.com/opsgenie/opsgenie-go-sdk/schedule"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
const (
scheduleURL = "/v1/json/schedule"
)
// OpsGenieScheduleClient is the data type to make Schedule API requests.
type OpsGenieScheduleClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGenieScheduleClient.
func (cli *OpsGenieScheduleClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Create method creates a schedule at OpsGenie.
func (cli *OpsGenieScheduleClient) Create(req schedule.CreateScheduleRequest) (*schedule.CreateScheduleResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(scheduleURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var createScheduleResp schedule.CreateScheduleResponse
if err = resp.Body.FromJsonTo(&createScheduleResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &createScheduleResp, nil
}
// Update method updates a schedule at OpsGenie.
func (cli *OpsGenieScheduleClient) Update(req schedule.UpdateScheduleRequest) (*schedule.UpdateScheduleResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(scheduleURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var updateScheduleResp schedule.UpdateScheduleResponse
if err = resp.Body.FromJsonTo(&updateScheduleResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &updateScheduleResp, nil
}
// Delete method deletes a schedule at OpsGenie.
func (cli *OpsGenieScheduleClient) Delete(req schedule.DeleteScheduleRequest) (*schedule.DeleteScheduleResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildDeleteRequest(scheduleURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var deleteScheduleResp schedule.DeleteScheduleResponse
if err = resp.Body.FromJsonTo(&deleteScheduleResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &deleteScheduleResp, nil
}
// Get method retrieves specified schedule details from OpsGenie.
func (cli *OpsGenieScheduleClient) Get(req schedule.GetScheduleRequest) (*schedule.GetScheduleResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(scheduleURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var getScheduleResp schedule.GetScheduleResponse
if err = resp.Body.FromJsonTo(&getScheduleResp); err != nil {
fmt.Println("Error parsing json")
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
fmt.Printf("%+v", getScheduleResp)
return &getScheduleResp, nil
}
// List method retrieves schedules from OpsGenie.
func (cli *OpsGenieScheduleClient) List(req schedule.ListSchedulesRequest) (*schedule.ListSchedulesResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(scheduleURL, req))
if resp == nil {
return nil, errors.New(err.Error())
}
defer resp.Body.Close()
var listSchedulesResp schedule.ListSchedulesResponse
if err = resp.Body.FromJsonTo(&listSchedulesResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listSchedulesResp, nil
}

View File

@ -0,0 +1,142 @@
package client
import (
"errors"
"github.com/opsgenie/opsgenie-go-sdk/team"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
const (
teamURL = "/v1/json/team"
teamLogsURL = "/v1/json/team/log"
)
// OpsGenieTeamClient is the data type to make Team API requests.
type OpsGenieTeamClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGenieTeamClient.
func (cli *OpsGenieTeamClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Create method creates a team at OpsGenie.
func (cli *OpsGenieTeamClient) Create(req team.CreateTeamRequest) (*team.CreateTeamResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(teamURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var createTeamResp team.CreateTeamResponse
if err = resp.Body.FromJsonTo(&createTeamResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &createTeamResp, nil
}
// Update method updates a team at OpsGenie.
func (cli *OpsGenieTeamClient) Update(req team.UpdateTeamRequest) (*team.UpdateTeamResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(teamURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var updateTeamResp team.UpdateTeamResponse
if err = resp.Body.FromJsonTo(&updateTeamResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &updateTeamResp, nil
}
// Delete method deletes a team at OpsGenie.
func (cli *OpsGenieTeamClient) Delete(req team.DeleteTeamRequest) (*team.DeleteTeamResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildDeleteRequest(teamURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var deleteTeamResp team.DeleteTeamResponse
if err = resp.Body.FromJsonTo(&deleteTeamResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &deleteTeamResp, nil
}
// Get method retrieves specified team details from OpsGenie.
func (cli *OpsGenieTeamClient) Get(req team.GetTeamRequest) (*team.GetTeamResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(teamURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var getTeamResp team.GetTeamResponse
if err = resp.Body.FromJsonTo(&getTeamResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &getTeamResp, nil
}
// List method retrieves teams from OpsGenie.
func (cli *OpsGenieTeamClient) List(req team.ListTeamsRequest) (*team.ListTeamsResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(teamURL,req))
if resp == nil {
return nil, errors.New(err.Error())
}
defer resp.Body.Close()
var listTeamsResp team.ListTeamsResponse
if err = resp.Body.FromJsonTo(&listTeamsResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listTeamsResp, nil
}
// ListLogs method retrieves team logs from OpsGenie.
func (cli *OpsGenieTeamClient) ListLogs(req team.ListTeamLogsRequest) (*team.ListTeamLogsResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(teamLogsURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var listTeamLogsResp team.ListTeamLogsResponse
if err = resp.Body.FromJsonTo(&listTeamLogsResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listTeamLogsResp, nil
}

View File

@ -0,0 +1,122 @@
package client
import (
"errors"
"github.com/opsgenie/opsgenie-go-sdk/user"
"github.com/opsgenie/opsgenie-go-sdk/logging"
)
const (
userURL = "/v1/json/user"
)
// OpsGenieUserClient is the data type to make User API requests.
type OpsGenieUserClient struct {
OpsGenieClient
}
// SetOpsGenieClient sets the embedded OpsGenieClient type of the OpsGenieUserClient.
func (cli *OpsGenieUserClient) SetOpsGenieClient(ogCli OpsGenieClient) {
cli.OpsGenieClient = ogCli
}
// Create method creates a user at OpsGenie.
func (cli *OpsGenieUserClient) Create(req user.CreateUserRequest) (*user.CreateUserResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(userURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var createUserResp user.CreateUserResponse
if err = resp.Body.FromJsonTo(&createUserResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &createUserResp, nil
}
// Update method updates a user at OpsGenie.
func (cli *OpsGenieUserClient) Update(req user.UpdateUserRequest) (*user.UpdateUserResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildPostRequest(userURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var updateUserResp user.UpdateUserResponse
if err = resp.Body.FromJsonTo(&updateUserResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &updateUserResp, nil
}
// Delete method deletes a user at OpsGenie.
func (cli *OpsGenieUserClient) Delete(req user.DeleteUserRequest) (*user.DeleteUserResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildDeleteRequest(userURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var deleteUserResp user.DeleteUserResponse
if err = resp.Body.FromJsonTo(&deleteUserResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &deleteUserResp, nil
}
// Get method retrieves specified user details from OpsGenie.
func (cli *OpsGenieUserClient) Get(req user.GetUserRequest) (*user.GetUserResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(userURL, req))
if resp == nil {
return nil, err
}
defer resp.Body.Close()
var getUserResp user.GetUserResponse
if err = resp.Body.FromJsonTo(&getUserResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &getUserResp, nil
}
// List method retrieves users from OpsGenie.
func (cli *OpsGenieUserClient) List(req user.ListUsersRequest) (*user.ListUsersResponse, error) {
req.APIKey = cli.apiKey
resp, err := cli.sendRequest(cli.buildGetRequest(userURL, req))
if resp == nil {
return nil, errors.New(err.Error())
}
defer resp.Body.Close()
var listUsersResp user.ListUsersResponse
if err = resp.Body.FromJsonTo(&listUsersResp); err != nil {
message := "Server response can not be parsed, " + err.Error()
logging.Logger().Warn(message)
return nil, errors.New(message)
}
return &listUsersResp, nil
}

View File

@ -0,0 +1,50 @@
/*
Copyright 2016. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package escalation provides requests and response structures to achieve Escalation API actions.
package escalation
// Rule defines the structure for each escalation rule definition
type Rule struct {
Delay int `json:"delay"`
Notify string `json:"notify,omitempty"`
NotifyType string `json:"notifyType,omitempty"`
NotifyCondition string `json:"notifyCondition,omitempty"`
}
// CreateEscalationRequest provides necessary parameter structure for creating escalation
type CreateEscalationRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
Rules []Rule `json:"rules,omitempty"`
}
// UpdateEscalationRequest provides necessary parameter structure for updating an escalation
type UpdateEscalationRequest struct {
APIKey string `json:"apiKey,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Rules []Rule `json:"rules,omitempty"`
}
// DeleteEscalationRequest provides necessary parameter structure for deleting an escalation
type DeleteEscalationRequest struct {
APIKey string `url:"apiKey,omitempty"`
ID string `url:"id,omitempty"`
Name string `url:"name,omitempty"`
}
// GetEscalationRequest provides necessary parameter structure for requesting escalation information
type GetEscalationRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Name string `url:"name,omitempty"`
}
// ListEscalationRequest provides necessary parameter structure for listing escalations
type ListEscalationsRequest struct {
APIKey string `url:"apiKey,omitempty"`
}

View File

@ -0,0 +1,34 @@
package escalation
// Create escalation response structure
type CreateEscalationResponse struct {
Id string `json:"id"`
Status string `json:"status"`
Code int `json:"code"`
}
// Update escalation response structure
type UpdateEscalationResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Delete escalation response structure
type DeleteEscalationResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Get escalation structure
type GetEscalationResponse struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Team string `json:"team,omitempty"`
Rules []Rule `json:"rules,omitempty"`
}
// List escalations response structure
type ListEscalationsResponse struct {
Escalations []GetEscalationResponse `json:"escalations,omitempty"`
}

View File

@ -0,0 +1,63 @@
/*
Copyright 2015 OpsGenie. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package heartbeat provides requests and response structures to achieve Heartbeat API actions.
package heartbeat
// AddHeartbeatRequest provides necessary parameter structure to Create an Heartbeat at OpsGenie.
type AddHeartbeatRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
Interval int `json:"interval,omitempty"`
IntervalUnit string `json:"intervalUnit,omitempty"`
Description string `json:"description,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
}
// UpdateHeartbeatRequest provides necessary parameter structure to Update an existing Heartbeat at OpsGenie.
type UpdateHeartbeatRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
Interval int `json:"interval,omitempty"`
IntervalUnit string `json:"intervalUnit,omitempty"`
Description string `json:"description,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
}
// EnableHeartbeatRequest provides necessary parameter structure to Enable an Heartbeat at OpsGenie.
type EnableHeartbeatRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
}
// DisableHeartbeatRequest provides necessary parameter structure to Disable an Heartbeat at OpsGenie.
type DisableHeartbeatRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
}
// DeleteHeartbeatRequest provides necessary parameter structure to Delete an Heartbeat from OpsGenie.
type DeleteHeartbeatRequest struct {
APIKey string `url:"apiKey,omitempty"`
Name string `url:"name,omitempty"`
}
// GetHeartbeatRequest provides necessary parameter structure to Retrieve an Heartbeat with details from OpsGenie.
type GetHeartbeatRequest struct {
APIKey string `url:"apiKey,omitempty"`
Name string `url:"name,omitempty"`
}
// ListHeartbeatsRequest provides necessary parameter structure to Retrieve Heartbeats from OpsGenie.
type ListHeartbeatsRequest struct {
APIKey string `url:"apiKey,omitempty"`
}
// SendHeartbeatRequest provides necessary parameter structure to Send an Heartbeat Signal to OpsGenie.
type SendHeartbeatRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
}

View File

@ -0,0 +1,63 @@
package heartbeat
// AddHeartbeatResponse holds the result data of the AddHeartbeatRequest.
type AddHeartbeatResponse struct {
Name string `json:"name"`
Status string `json:"status"`
Code int `json:"code"`
}
// UpdateHeartbeatResponse holds the result data of the UpdateHeartbeatRequest.
type UpdateHeartbeatResponse struct {
Name string `json:"name"`
Status string `json:"status"`
Code int `json:"code"`
}
// EnableHeartbeatResponse holds the result data of the EnableHeartbeatRequest.
type EnableHeartbeatResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// DisableHeartbeatResponse holds the result data of the DisableHeartbeatRequest.
type DisableHeartbeatResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// DeleteHeartbeatResponse holds the result data of the DeleteHeartbeatRequest.
type DeleteHeartbeatResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// GetHeartbeatResponse holds the result data of the GetHeartbeatRequest.
type GetHeartbeatResponse struct {
Heartbeat
}
// ListHeartbeatsResponse holds the result data of the ListHeartbeatsRequest.
type ListHeartbeatsResponse struct {
Heartbeats []Heartbeat `json:"heartbeats"`
}
type Heartbeat struct {
Name string `json:"name"`
Status string `json:"status"`
Description string `json:"description"`
Enabled bool `json:"enabled"`
LastHeartbeat uint64 `json:"lastHeartBeat"`
Interval int `json:"interval"`
IntervalUnit string `json:"intervalUnit"`
Expired bool `json:"expired"`
}
// SendHeartbeatResponse holds the result data of the SendHeartbeatRequest.
type SendHeartbeatResponse struct {
WillExpireAt uint64 `json:"willExpireAt"`
Status string `json:"status"`
Heartbeat uint64 `json:"heartbeat"`
Took int `json:"took"`
Code int `json:"code"`
}

View File

@ -0,0 +1,22 @@
/*
Copyright 2015 OpsGenie. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package integration provides requests and response structures to achieve Integration API actions.
package integration
// EnableIntegrationRequest provides necessary parameter structure to Enable an integration at OpsGenie.
type EnableIntegrationRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
}
// DisableIntegrationRequest provides necessary parameter structure to Disable an integration at OpsGenie.
type DisableIntegrationRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
}

View File

@ -0,0 +1,13 @@
package integration
// EnableIntegrationResponse holds the result data of the EnableIntegrationRequest.
type EnableIntegrationResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// DisableIntegrationResponse holds the result data of the DisableIntegrationRequest.
type DisableIntegrationResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}

View File

@ -0,0 +1,51 @@
/*
Copyright 2015 OpsGenie. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package logging provides log interface.
package logging
import (
"fmt"
"github.com/cihub/seelog"
)
// logger is the internal logger object.
var logger seelog.LoggerInterface
func init() {
DisableLog()
}
// DisableLog disables all library log output.
func DisableLog() {
logger = seelog.Disabled
}
// UseLogger is a wrapper for Seelog's UseLogger function. It sets the newLogger as the current logger.
func UseLogger(newLogger seelog.LoggerInterface) {
logger = newLogger
seelog.UseLogger(logger)
}
// Logger returns internal logger object to achieve logging.
func Logger() seelog.LoggerInterface {
return logger
}
// ConfigureLogger configures the new logger according to the configuration and sets it as the current logger.
func ConfigureLogger(testConfig []byte) {
loggr, err := seelog.LoggerFromConfigAsBytes([]byte(testConfig))
if err != nil {
fmt.Printf("error occured: %s\n", err.Error())
}
UseLogger(loggr)
}
// FlushLog is a wrapper for seelog's Flush function. It flushes all the messages in the logger.
func FlushLog() {
logger.Flush()
}

View File

@ -0,0 +1,22 @@
/*
Copyright 2015 OpsGenie. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package policy provides requests and response structures to achieve Policy API actions.
package policy
// EnablePolicyRequest provides necessary parameter structure to Enable a policy at OpsGenie.
type EnablePolicyRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
}
// DisablePolicyRequest provides necessary parameter structure to Disable a policy at OpsGenie.
type DisablePolicyRequest struct {
APIKey string `json:"apiKey,omitempty"`
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
}

View File

@ -0,0 +1,13 @@
package policy
// EnablePolicyResponse holds the result data of the EnablePolicyRequest.
type EnablePolicyResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// DisablePolicyResponse holds the result data of the DisablePolicyRequest.
type DisablePolicyResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}

View File

@ -0,0 +1,65 @@
/*
Copyright 2016. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package schedule provides requests and response structures to achieve Schedule API actions.
package schedule
// Restrictions defines the structure for each rotation restrictions
type Restriction struct {
StartDay string `json:"startDay,omitempty"`
StartTime string `json:"startTime,omitempty"`
EndDay string `json:"endDay,omitempty"`
EndTime string `json:"endTime,omitempty"`
}
// Rotation defines the structure for each rotation definition
type Rotation struct {
StartDate string `json:"startDate,omitempty"`
EndDate string `json:"endDate,omitempty"`
RotationType string `json:"rotationType,omitempty"`
Participants []string `json:"participants,omitempty"`
Name string `json:"name,omitempty"`
RotationLength int `json:"rotationLength,omitempty"`
Restrictions []Restriction `json:"restrictions,omitempty"`
}
// CreateScheduleRequest provides necessary parameter structure for creating Schedule
type CreateScheduleRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
Timezone string `json:"timezone,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
Rotations []Rotation `json:"rotations,omitempty"`
}
// UpdateScheduleRequest provides necessary parameter structure for updating an Schedule
type UpdateScheduleRequest struct {
Id string `json:"id,omitempty"`
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
Timezone string `json:"timezone,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
Rotations []Rotation `json:"rotations,omitempty"`
}
// DeleteScheduleRequest provides necessary parameter structure for deleting an Schedule
type DeleteScheduleRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Name string `url:"name,omitempty"`
}
// GetScheduleRequest provides necessary parameter structure for requesting Schedule information
type GetScheduleRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Name string `url:"name,omitempty"`
}
// ListScheduleRequest provides necessary parameter structure for listing Schedules
type ListSchedulesRequest struct {
APIKey string `url:"apiKey,omitempty"`
}

View File

@ -0,0 +1,51 @@
package schedule
// Create schedule response structure
type CreateScheduleResponse struct {
Id string `json:"id"`
Status string `json:"status"`
Code int `json:"code"`
}
// Update schedule response structure
type UpdateScheduleResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Delete schedule response structure
type DeleteScheduleResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Participant
type Participant struct {
Participant string `json:"participant,omitempty"`
Type string `json:"type,omitempty"`
}
// RotationInfo defines the structure for each rotation definition
type RotationInfo struct {
Id string `json:"id,omitempty"`
StartDate string `json:"startDate,omitempty"`
EndDate string `json:"endDate,omitempty"`
RotationType string `json:"rotationType,omitempty"`
Participants []Participant `json:"participants,omitempty"`
Name string `json:"name,omitempty"`
RotationLength int `json:"rotationLength,omitempty"`
Restrictions []Restriction `json:"restrictions,omitempty"`
}
// Get schedule structure
type GetScheduleResponse struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Team string `json:"team,omitempty"`
Rules []RotationInfo `json:"rules,omitempty"`
}
// List schedule response structure
type ListSchedulesResponse struct {
Schedules []GetScheduleResponse `json:"schedules,omitempty"`
}

View File

@ -0,0 +1,58 @@
/*
Copyright 2016. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package team provides requests and response structures to achieve Team API actions.
package team
// Member defines the structure for each team members definition
type Member struct {
User string `json:"user,omitempty"`
Role string `json:"role,omitempty"`
}
// CreateTeamRequest provides necessary parameter structure for creating team
type CreateTeamRequest struct {
APIKey string `json:"apiKey,omitempty"`
Name string `json:"name,omitempty"`
Members []Member `json:"members,omitempty"`
}
// UpdateTeamRequest provides necessary parameter structure for updating a team
type UpdateTeamRequest struct {
APIKey string `json:"apiKey,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Members []Member `json:"members,omitempty"`
}
// DeleteTeamRequest provides necessary parameter structure for deleting a team
type DeleteTeamRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Name string `url:"name,omitempty"`
}
// GetTeamRequest provides necessary parameter structure for requesting team information
type GetTeamRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Name string `url:"name,omitempty"`
}
// ListTeamsRequest provides necessary parameter structure for listing teams
type ListTeamsRequest struct {
APIKey string `url:"apiKey,omitempty"`
}
// ListTeamLogsRequest provides necessary parameter structure for listing team logs
type ListTeamLogsRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Name string `url:"name,omitempty"`
Limit int `url:"limit,omitempty"`
Order string `url:"order,omitempty"`
LastKey string `url:"lastkey,omitempty"`
}

View File

@ -0,0 +1,46 @@
package team
// Create team response structure
type CreateTeamResponse struct {
Id string `json:"id"`
Status string `json:"status"`
Code int `json:"code"`
}
// Update team response structure
type UpdateTeamResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Delete team response structure
type DeleteTeamResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Get team response structure
type GetTeamResponse struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Members []Member `json:"members,omitempty"`
}
// List teams response structure
type ListTeamsResponse struct {
Teams []GetTeamResponse `json:"teams,omitempty"`
}
// A single team log entry
type TeamLogEntry struct {
Log string `json:"log"`
Owner string `json:"owner"`
CreatedAt uint `json:"createdAt"`
}
//List team logs response structure
type ListTeamLogsResponse struct {
LastKey string `json:"lastKey,omitempty"`
Logs []TeamLogEntry `json:logs,omitempty`
}

View File

@ -0,0 +1,47 @@
/*
Copyright 2016. All rights reserved.
Use of this source code is governed by a Apache Software
license that can be found in the LICENSE file.
*/
//Package user provides requests and response structures to achieve User API actions.
package user
// CreateUserRequest provides necessary parameter structure for creating User
type CreateUserRequest struct {
APIKey string `json:"apiKey,omitempty"`
Username string `json:"username,omitempty"`
Fullname string `json:"fullname,omitempty"`
Role string `json:"role,omitempty"`
Locale string `json:"locale,omitempty"`
Timezone string `json:"timezone,omitempty"`
}
// UpdateUserRequest provides necessary parameter structure for updating an User
type UpdateUserRequest struct {
Id string `json:"id,omitempty"`
APIKey string `json:"apiKey,omitempty"`
Fullname string `json:"fullname,omitempty"`
Role string `json:"role,omitempty"`
Locale string `json:"locale,omitempty"`
Timezone string `json:"timezone,omitempty"`
}
// DeleteUserRequest provides necessary parameter structure for deleting an User
type DeleteUserRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Username string `url:"username,omitempty"`
}
// GetUserRequest provides necessary parameter structure for requesting User information
type GetUserRequest struct {
APIKey string `url:"apiKey,omitempty"`
Id string `url:"id,omitempty"`
Username string `url:"username,omitempty"`
}
// ListUserRequest provides necessary parameter structure for listing Users
type ListUsersRequest struct {
APIKey string `url:"apiKey,omitempty"`
}

View File

@ -0,0 +1,46 @@
package user
// Create user response structure
type CreateUserResponse struct {
Id string `json:"id"`
Status string `json:"status"`
Code int `json:"code"`
}
// Update user response structure
type UpdateUserResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Delete user response structure
type DeleteUserResponse struct {
Status string `json:"status"`
Code int `json:"code"`
}
// Participant
type Contact struct {
To string `json:"to,omitempty"`
Method string `json:"method,omitempty"`
}
// Get user structure
type GetUserResponse struct {
Id string `json:"id,omitempty"`
Username string `json:"username,omitempty"`
Fullname string `json:"fullname,omitempty"`
Timezone string `json:"timezone,omitempty"`
Locale string `json:"locale,omitempty"`
State string `json:"state,omitempty"`
Escalations []string `json:"escalations,omitempty"`
Schedules []string `json:"schedules,omitempty"`
Role string `json:"role,omitempty"`
Groups []string `json:"groups,omitempty"`
Contacts []Contact `json:"contacts,omitempty"`
}
// List user response structure
type ListUsersResponse struct {
Users []GetUserResponse `json:"users,omitempty"`
}

251
vendor/vendor.json vendored

File diff suppressed because it is too large Load Diff

View File

@ -41,6 +41,7 @@ body.layout-mysql,
body.layout-newrelic,
body.layout-nomad,
body.layout-openstack,
body.layout-opsgenie,
body.layout-packet,
body.layout-pagerduty,
body.layout-postgresql,

View File

@ -0,0 +1,33 @@
---
layout: "opsgenie"
page_title: "OpsGenie: opsgenie_user"
sidebar_current: "docs-opsgenie-datasource-user"
description: |-
Gets information about a specific user within OpsGenie
---
# opsgenie\_user
Use this data source to get information about a specific user within OpsGenie.
## Example Usage
```
data "opsgenie_user" "me" {
username = "me@cookie-monster.com"
}
resource "opsgenie_team" "test" {
TODO: example
}
```
## Argument Reference
The following arguments are supported:
* `username` - (Required) The username (email) to use to find a user in OpsGenie.
## Attributes Reference
* `full_name` - The full name of the found user.
* `role` - The role of the found user.

View File

@ -0,0 +1,45 @@
---
layout: "opsgenie"
page_title: "Provider: OpsGenie"
sidebar_current: "docs-opsgenie-index"
description: |-
The OpsGenie provider is used to interact with the many resources supported by OpsGenie. The provider needs to be configured with the proper credentials before it can be used.
---
# OpsGenie Provider
The OpenStack provider is used to interact with the
many resources supported by OpsGenie. The provider needs to be configured
with the proper credentials before it can be used.
Use the navigation to the left to read about the available resources.
## Example Usage
```
# Configure the OpenStack Provider
provider "opsgenie" {
api_key = "key"
}
# Create a user
resource "opsgenie_user" "test" {
...
}
```
## Configuration Reference
The following arguments are supported:
* `api_key` - (Required) The API Key for the OpsGenie Integration. If omitted, the
`OPSGENIE_API_KEY` environment variable is used.
You can generate an API Key within OpsGenie by creating a new API Integration with Read/Write permissions.
## Testing and Development
In order to run the Acceptance Tests for development, the following environment
variables must also be set:
* `OPSGENIE_API_KEY` - The API Key used for the OpsGenie Integration.

View File

@ -0,0 +1,67 @@
---
layout: "opsgenie"
page_title: "OpsGenie: opsgenie_team"
sidebar_current: "docs-opsgenie-resource-team"
description: |-
Manages a Team within OpsGenie.
---
# opsgenie\_team
Manages a Team within OpsGenie.
## Example Usage
```
resource "opsgenie_user" "first" {
username = "user@domain.com"
full_name = "Cookie Monster"
role = "User"
}
resource "opsgenie_user" "second" {
username = "eggman@dr-robotnik.com"
full_name = "Dr Ivo Eggman Robotnik"
role = "User"
}
resource "opsgenie_team" "test" {
name = "example"
member {
username = "${opsgenie_user.first.username}"
role = "admin"
}
member {
username = "${opsgenie_user.second.username}"
role = "user"
}
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) The name associated with this team. OpsGenie defines that this must not be longer than 100 characters.
* `member` - (Optional) A Member block as documented below.
`member` supports the following:
* `username` - (Required) The username for the member to add to this Team.
* `role` - (Required) The role for the user within the Team - can be either 'Admin' or 'User'.
## Attributes Reference
The following attributes are exported:
* `id` - The ID of the OpsGenie User.
## Import
Users can be imported using the `id`, e.g.
```
$ terraform import opsgenie_team.team1 812be1a1-32c8-4666-a7fb-03ecc385106c
```

View File

@ -0,0 +1,51 @@
---
layout: "opsgenie"
page_title: "OpsGenie: opsgenie_user"
sidebar_current: "docs-opsgenie-resource-user"
description: |-
Manages a User within OpsGenie.
---
# opsgenie\_user
Manages a User within OpsGenie.
## Example Usage
```
resource "opsgenie_user" "test" {
username = "user@domain.com"
full_name = "Cookie Monster"
role = "User"
locale = "en_US"
timezone = "America/New_York"
}
```
## Argument Reference
The following arguments are supported:
* `username` - (Required) The email address associated with this user. OpsGenie defines that this must not be longer than 100 characters.
* `full_name` - (Required) The Full Name of the User.
* `role` - (Required) The Role assigned to the User. Either a built-in such as 'Owner', 'Admin' or 'User' - or the name of a custom role.
* `locale` - (Optional) Location information for the user. Please look at [Supported Locale Ids](https://www.opsgenie.com/docs/miscellaneous/supported-locales) for available locales - Defaults to "en_US".
* `timezone` - (Optional) Timezone information of the user. Please look at [Supported Timezone Ids](https://www.opsgenie.com/docs/miscellaneous/supported-timezone-ids) for available timezones - Defaults to "America/New_York".
## Attributes Reference
The following attributes are exported:
* `id` - The ID of the OpsGenie User.
## Import
Users can be imported using the `id`, e.g.
```
$ terraform import opsgenie_user.user da4faf16-5546-41e4-8330-4d0002b74048
```

View File

@ -310,6 +310,10 @@
<a href="/docs/providers/openstack/index.html">OpenStack</a>
</li>
<li<%= sidebar_current("docs-providers-opsgenie") %>>
<a href="/docs/providers/opsgenie/index.html">OpsGenie</a>
</li>
<li<%= sidebar_current("docs-providers-packet") %>>
<a href="/docs/providers/packet/index.html">Packet</a>
</li>
@ -321,10 +325,10 @@
<li<%= sidebar_current("docs-providers-postgresql") %>>
<a href="/docs/providers/postgresql/index.html">PostgreSQL</a>
</li>
<li<%= sidebar_current("docs-providers-powerdns") %>>
<a href="/docs/providers/powerdns/index.html">PowerDNS</a>
</li>
<a href="/docs/providers/powerdns/index.html">PowerDNS</a>
</li>
<li<%= sidebar_current("docs-providers-rabbitmq") %>>
<a href="/docs/providers/rabbitmq/index.html">RabbitMQ</a>

Some files were not shown because too many files have changed in this diff Show More