#include 
#include 

int binaryToDecimal(const char* binary, int len) {
    int decimal = 0;
    for (int i = 0; i < len; i++) {
        if (binary[i] == '1') {
            decimal += (1 << (len - i - 1));
        }
    }
    return decimal;
}

int main() {
    char binary[33]; // 32 bits + 1 for null terminator
    scanf("%32s", binary);
    int len = strlen(binary);

    // Validate input
    for (int i = 0; i < len; i++) {
        if (binary[i] != '0' && binary[i] != '1') {
            printf("Invalid Input\n");
            return 0;
        }
    }

    if (len > 32) {
        printf("Invalid Input\n");
        return 0;
    }

    // Determine decimal value
    if (binary[0] == '0') {
        // Positive number
        int decimal = binaryToDecimal(binary, len);
        printf("%d\n", decimal);
    } else {
        // Negative number (two's complement)
        int decimal = binaryToDecimal(binary, len);
        decimal -= (1 << len); // Adjust for two's complement
        printf("%d\n", decimal);
    }

    return 0;
}